| 7 1 6 4 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 | // SPDX-License-Identifier: GPL-2.0-only /* * xt_u32 - kernel module to match u32 packet content * * Original author: Don Cohen <don@isis.cs3-inc.com> * (C) CC Computer Consultants GmbH, 2007 */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/spinlock.h> #include <linux/skbuff.h> #include <linux/types.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_u32.h> static bool u32_match_it(const struct xt_u32 *data, const struct sk_buff *skb) { const struct xt_u32_test *ct; unsigned int testind; unsigned int nnums; unsigned int nvals; unsigned int i; __be32 n; u_int32_t pos; u_int32_t val; u_int32_t at; /* * Small example: "0 >> 28 == 4 && 8 & 0xFF0000 >> 16 = 6, 17" * (=IPv4 and (TCP or UDP)). Outer loop runs over the "&&" operands. */ for (testind = 0; testind < data->ntests; ++testind) { ct = &data->tests[testind]; at = 0; pos = ct->location[0].number; if (skb->len < 4 || pos > skb->len - 4) return false; if (skb_copy_bits(skb, pos, &n, sizeof(n)) < 0) BUG(); val = ntohl(n); nnums = ct->nnums; /* Inner loop runs over "&", "<<", ">>" and "@" operands */ for (i = 1; i < nnums; ++i) { u_int32_t number = ct->location[i].number; switch (ct->location[i].nextop) { case XT_U32_AND: val &= number; break; case XT_U32_LEFTSH: val <<= number; break; case XT_U32_RIGHTSH: val >>= number; break; case XT_U32_AT: if (at + val < at) return false; at += val; pos = number; if (at + 4 < at || skb->len < at + 4 || pos > skb->len - at - 4) return false; if (skb_copy_bits(skb, at + pos, &n, sizeof(n)) < 0) BUG(); val = ntohl(n); break; } } /* Run over the "," and ":" operands */ nvals = ct->nvalues; for (i = 0; i < nvals; ++i) if (ct->value[i].min <= val && val <= ct->value[i].max) break; if (i >= ct->nvalues) return false; } return true; } static bool u32_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_u32 *data = par->matchinfo; bool ret; ret = u32_match_it(data, skb); return ret ^ data->invert; } static int u32_mt_checkentry(const struct xt_mtchk_param *par) { const struct xt_u32 *data = par->matchinfo; const struct xt_u32_test *ct; unsigned int i; if (data->ntests > ARRAY_SIZE(data->tests)) return -EINVAL; for (i = 0; i < data->ntests; ++i) { ct = &data->tests[i]; if (ct->nnums > ARRAY_SIZE(ct->location) || ct->nvalues > ARRAY_SIZE(ct->value)) return -EINVAL; } return 0; } static struct xt_match xt_u32_mt_reg __read_mostly = { .name = "u32", .revision = 0, .family = NFPROTO_UNSPEC, .match = u32_mt, .checkentry = u32_mt_checkentry, .matchsize = sizeof(struct xt_u32), .me = THIS_MODULE, }; static int __init u32_mt_init(void) { return xt_register_match(&xt_u32_mt_reg); } static void __exit u32_mt_exit(void) { xt_unregister_match(&xt_u32_mt_reg); } module_init(u32_mt_init); module_exit(u32_mt_exit); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: arbitrary byte matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_u32"); MODULE_ALIAS("ip6t_u32"); |
| 4 11 11 4 4 4 4 4 4 4 4 4 1 1 1 16 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 | // SPDX-License-Identifier: GPL-2.0 #include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/ceph/libceph.h> #include <linux/ceph/osdmap.h> #include <linux/ceph/decode.h> #include <linux/crush/hash.h> #include <linux/crush/mapper.h> static __printf(2, 3) void osdmap_info(const struct ceph_osdmap *map, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_INFO "%s (%pU e%u): %pV", KBUILD_MODNAME, &map->fsid, map->epoch, &vaf); va_end(args); } char *ceph_osdmap_state_str(char *str, int len, u32 state) { if (!len) return str; if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP)) snprintf(str, len, "exists, up"); else if (state & CEPH_OSD_EXISTS) snprintf(str, len, "exists"); else if (state & CEPH_OSD_UP) snprintf(str, len, "up"); else snprintf(str, len, "doesn't exist"); return str; } /* maps */ static int calc_bits_of(unsigned int t) { int b = 0; while (t) { t = t >> 1; b++; } return b; } /* * the foo_mask is the smallest value 2^n-1 that is >= foo. */ static void calc_pg_masks(struct ceph_pg_pool_info *pi) { pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1; pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1; } /* * decode crush map */ static int crush_decode_uniform_bucket(void **p, void *end, struct crush_bucket_uniform *b) { dout("crush_decode_uniform_bucket %p to %p\n", *p, end); ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad); b->item_weight = ceph_decode_32(p); return 0; bad: return -EINVAL; } static int crush_decode_list_bucket(void **p, void *end, struct crush_bucket_list *b) { int j; dout("crush_decode_list_bucket %p to %p\n", *p, end); b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->item_weights == NULL) return -ENOMEM; b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->sum_weights == NULL) return -ENOMEM; ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); for (j = 0; j < b->h.size; j++) { b->item_weights[j] = ceph_decode_32(p); b->sum_weights[j] = ceph_decode_32(p); } return 0; bad: return -EINVAL; } static int crush_decode_tree_bucket(void **p, void *end, struct crush_bucket_tree *b) { int j; dout("crush_decode_tree_bucket %p to %p\n", *p, end); ceph_decode_8_safe(p, end, b->num_nodes, bad); b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); if (b->node_weights == NULL) return -ENOMEM; ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad); for (j = 0; j < b->num_nodes; j++) b->node_weights[j] = ceph_decode_32(p); return 0; bad: return -EINVAL; } static int crush_decode_straw_bucket(void **p, void *end, struct crush_bucket_straw *b) { int j; dout("crush_decode_straw_bucket %p to %p\n", *p, end); b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->item_weights == NULL) return -ENOMEM; b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->straws == NULL) return -ENOMEM; ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad); for (j = 0; j < b->h.size; j++) { b->item_weights[j] = ceph_decode_32(p); b->straws[j] = ceph_decode_32(p); } return 0; bad: return -EINVAL; } static int crush_decode_straw2_bucket(void **p, void *end, struct crush_bucket_straw2 *b) { int j; dout("crush_decode_straw2_bucket %p to %p\n", *p, end); b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS); if (b->item_weights == NULL) return -ENOMEM; ceph_decode_need(p, end, b->h.size * sizeof(u32), bad); for (j = 0; j < b->h.size; j++) b->item_weights[j] = ceph_decode_32(p); return 0; bad: return -EINVAL; } struct crush_name_node { struct rb_node cn_node; int cn_id; char cn_name[]; }; static struct crush_name_node *alloc_crush_name(size_t name_len) { struct crush_name_node *cn; cn = kmalloc(sizeof(*cn) + name_len + 1, GFP_NOIO); if (!cn) return NULL; RB_CLEAR_NODE(&cn->cn_node); return cn; } static void free_crush_name(struct crush_name_node *cn) { WARN_ON(!RB_EMPTY_NODE(&cn->cn_node)); kfree(cn); } DEFINE_RB_FUNCS(crush_name, struct crush_name_node, cn_id, cn_node) static int decode_crush_names(void **p, void *end, struct rb_root *root) { u32 n; ceph_decode_32_safe(p, end, n, e_inval); while (n--) { struct crush_name_node *cn; int id; u32 name_len; ceph_decode_32_safe(p, end, id, e_inval); ceph_decode_32_safe(p, end, name_len, e_inval); ceph_decode_need(p, end, name_len, e_inval); cn = alloc_crush_name(name_len); if (!cn) return -ENOMEM; cn->cn_id = id; memcpy(cn->cn_name, *p, name_len); cn->cn_name[name_len] = '\0'; *p += name_len; if (!__insert_crush_name(root, cn)) { free_crush_name(cn); return -EEXIST; } } return 0; e_inval: return -EINVAL; } void clear_crush_names(struct rb_root *root) { while (!RB_EMPTY_ROOT(root)) { struct crush_name_node *cn = rb_entry(rb_first(root), struct crush_name_node, cn_node); erase_crush_name(root, cn); free_crush_name(cn); } } static struct crush_choose_arg_map *alloc_choose_arg_map(void) { struct crush_choose_arg_map *arg_map; arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO); if (!arg_map) return NULL; RB_CLEAR_NODE(&arg_map->node); return arg_map; } static void free_choose_arg_map(struct crush_choose_arg_map *arg_map) { if (arg_map) { int i, j; WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); for (i = 0; i < arg_map->size; i++) { struct crush_choose_arg *arg = &arg_map->args[i]; for (j = 0; j < arg->weight_set_size; j++) kfree(arg->weight_set[j].weights); kfree(arg->weight_set); kfree(arg->ids); } kfree(arg_map->args); kfree(arg_map); } } DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index, node); void clear_choose_args(struct crush_map *c) { while (!RB_EMPTY_ROOT(&c->choose_args)) { struct crush_choose_arg_map *arg_map = rb_entry(rb_first(&c->choose_args), struct crush_choose_arg_map, node); erase_choose_arg_map(&c->choose_args, arg_map); free_choose_arg_map(arg_map); } } static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen) { u32 *a = NULL; u32 len; int ret; ceph_decode_32_safe(p, end, len, e_inval); if (len) { u32 i; a = kmalloc_array(len, sizeof(u32), GFP_NOIO); if (!a) { ret = -ENOMEM; goto fail; } ceph_decode_need(p, end, len * sizeof(u32), e_inval); for (i = 0; i < len; i++) a[i] = ceph_decode_32(p); } *plen = len; return a; e_inval: ret = -EINVAL; fail: kfree(a); return ERR_PTR(ret); } /* * Assumes @arg is zero-initialized. */ static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg) { int ret; ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval); if (arg->weight_set_size) { u32 i; arg->weight_set = kmalloc_array(arg->weight_set_size, sizeof(*arg->weight_set), GFP_NOIO); if (!arg->weight_set) return -ENOMEM; for (i = 0; i < arg->weight_set_size; i++) { struct crush_weight_set *w = &arg->weight_set[i]; w->weights = decode_array_32_alloc(p, end, &w->size); if (IS_ERR(w->weights)) { ret = PTR_ERR(w->weights); w->weights = NULL; return ret; } } } arg->ids = decode_array_32_alloc(p, end, &arg->ids_size); if (IS_ERR(arg->ids)) { ret = PTR_ERR(arg->ids); arg->ids = NULL; return ret; } return 0; e_inval: return -EINVAL; } static int decode_choose_args(void **p, void *end, struct crush_map *c) { struct crush_choose_arg_map *arg_map = NULL; u32 num_choose_arg_maps, num_buckets; int ret; ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval); while (num_choose_arg_maps--) { arg_map = alloc_choose_arg_map(); if (!arg_map) { ret = -ENOMEM; goto fail; } ceph_decode_64_safe(p, end, arg_map->choose_args_index, e_inval); arg_map->size = c->max_buckets; arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args), GFP_NOIO); if (!arg_map->args) { ret = -ENOMEM; goto fail; } ceph_decode_32_safe(p, end, num_buckets, e_inval); while (num_buckets--) { struct crush_choose_arg *arg; u32 bucket_index; ceph_decode_32_safe(p, end, bucket_index, e_inval); if (bucket_index >= arg_map->size) goto e_inval; arg = &arg_map->args[bucket_index]; ret = decode_choose_arg(p, end, arg); if (ret) goto fail; if (arg->ids_size && arg->ids_size != c->buckets[bucket_index]->size) goto e_inval; } insert_choose_arg_map(&c->choose_args, arg_map); } return 0; e_inval: ret = -EINVAL; fail: free_choose_arg_map(arg_map); return ret; } static void crush_finalize(struct crush_map *c) { __s32 b; /* Space for the array of pointers to per-bucket workspace */ c->working_size = sizeof(struct crush_work) + c->max_buckets * sizeof(struct crush_work_bucket *); for (b = 0; b < c->max_buckets; b++) { if (!c->buckets[b]) continue; switch (c->buckets[b]->alg) { default: /* * The base case, permutation variables and * the pointer to the permutation array. */ c->working_size += sizeof(struct crush_work_bucket); break; } /* Every bucket has a permutation array. */ c->working_size += c->buckets[b]->size * sizeof(__u32); } } static struct crush_map *crush_decode(void *pbyval, void *end) { struct crush_map *c; int err; int i, j; void **p = &pbyval; void *start = pbyval; u32 magic; dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p)); c = kzalloc(sizeof(*c), GFP_NOFS); if (c == NULL) return ERR_PTR(-ENOMEM); c->type_names = RB_ROOT; c->names = RB_ROOT; c->choose_args = RB_ROOT; /* set tunables to default values */ c->choose_local_tries = 2; c->choose_local_fallback_tries = 5; c->choose_total_tries = 19; c->chooseleaf_descend_once = 0; ceph_decode_need(p, end, 4*sizeof(u32), bad); magic = ceph_decode_32(p); if (magic != CRUSH_MAGIC) { pr_err("crush_decode magic %x != current %x\n", (unsigned int)magic, (unsigned int)CRUSH_MAGIC); goto bad; } c->max_buckets = ceph_decode_32(p); c->max_rules = ceph_decode_32(p); c->max_devices = ceph_decode_32(p); c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS); if (c->buckets == NULL) goto badmem; c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS); if (c->rules == NULL) goto badmem; /* buckets */ for (i = 0; i < c->max_buckets; i++) { int size = 0; u32 alg; struct crush_bucket *b; ceph_decode_32_safe(p, end, alg, bad); if (alg == 0) { c->buckets[i] = NULL; continue; } dout("crush_decode bucket %d off %x %p to %p\n", i, (int)(*p-start), *p, end); switch (alg) { case CRUSH_BUCKET_UNIFORM: size = sizeof(struct crush_bucket_uniform); break; case CRUSH_BUCKET_LIST: size = sizeof(struct crush_bucket_list); break; case CRUSH_BUCKET_TREE: size = sizeof(struct crush_bucket_tree); break; case CRUSH_BUCKET_STRAW: size = sizeof(struct crush_bucket_straw); break; case CRUSH_BUCKET_STRAW2: size = sizeof(struct crush_bucket_straw2); break; default: goto bad; } BUG_ON(size == 0); b = c->buckets[i] = kzalloc(size, GFP_NOFS); if (b == NULL) goto badmem; ceph_decode_need(p, end, 4*sizeof(u32), bad); b->id = ceph_decode_32(p); b->type = ceph_decode_16(p); b->alg = ceph_decode_8(p); b->hash = ceph_decode_8(p); b->weight = ceph_decode_32(p); b->size = ceph_decode_32(p); dout("crush_decode bucket size %d off %x %p to %p\n", b->size, (int)(*p-start), *p, end); b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS); if (b->items == NULL) goto badmem; ceph_decode_need(p, end, b->size*sizeof(u32), bad); for (j = 0; j < b->size; j++) b->items[j] = ceph_decode_32(p); switch (b->alg) { case CRUSH_BUCKET_UNIFORM: err = crush_decode_uniform_bucket(p, end, (struct crush_bucket_uniform *)b); if (err < 0) goto fail; break; case CRUSH_BUCKET_LIST: err = crush_decode_list_bucket(p, end, (struct crush_bucket_list *)b); if (err < 0) goto fail; break; case CRUSH_BUCKET_TREE: err = crush_decode_tree_bucket(p, end, (struct crush_bucket_tree *)b); if (err < 0) goto fail; break; case CRUSH_BUCKET_STRAW: err = crush_decode_straw_bucket(p, end, (struct crush_bucket_straw *)b); if (err < 0) goto fail; break; case CRUSH_BUCKET_STRAW2: err = crush_decode_straw2_bucket(p, end, (struct crush_bucket_straw2 *)b); if (err < 0) goto fail; break; } } /* rules */ dout("rule vec is %p\n", c->rules); for (i = 0; i < c->max_rules; i++) { u32 yes; struct crush_rule *r; ceph_decode_32_safe(p, end, yes, bad); if (!yes) { dout("crush_decode NO rule %d off %x %p to %p\n", i, (int)(*p-start), *p, end); c->rules[i] = NULL; continue; } dout("crush_decode rule %d off %x %p to %p\n", i, (int)(*p-start), *p, end); /* len */ ceph_decode_32_safe(p, end, yes, bad); #if BITS_PER_LONG == 32 if (yes > (ULONG_MAX - sizeof(*r)) / sizeof(struct crush_rule_step)) goto bad; #endif r = kmalloc(struct_size(r, steps, yes), GFP_NOFS); if (r == NULL) goto badmem; dout(" rule %d is at %p\n", i, r); c->rules[i] = r; r->len = yes; ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */ ceph_decode_need(p, end, r->len*3*sizeof(u32), bad); for (j = 0; j < r->len; j++) { r->steps[j].op = ceph_decode_32(p); r->steps[j].arg1 = ceph_decode_32(p); r->steps[j].arg2 = ceph_decode_32(p); } } err = decode_crush_names(p, end, &c->type_names); if (err) goto fail; err = decode_crush_names(p, end, &c->names); if (err) goto fail; ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */ /* tunables */ ceph_decode_need(p, end, 3*sizeof(u32), done); c->choose_local_tries = ceph_decode_32(p); c->choose_local_fallback_tries = ceph_decode_32(p); c->choose_total_tries = ceph_decode_32(p); dout("crush decode tunable choose_local_tries = %d\n", c->choose_local_tries); dout("crush decode tunable choose_local_fallback_tries = %d\n", c->choose_local_fallback_tries); dout("crush decode tunable choose_total_tries = %d\n", c->choose_total_tries); ceph_decode_need(p, end, sizeof(u32), done); c->chooseleaf_descend_once = ceph_decode_32(p); dout("crush decode tunable chooseleaf_descend_once = %d\n", c->chooseleaf_descend_once); ceph_decode_need(p, end, sizeof(u8), done); c->chooseleaf_vary_r = ceph_decode_8(p); dout("crush decode tunable chooseleaf_vary_r = %d\n", c->chooseleaf_vary_r); /* skip straw_calc_version, allowed_bucket_algs */ ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); *p += sizeof(u8) + sizeof(u32); ceph_decode_need(p, end, sizeof(u8), done); c->chooseleaf_stable = ceph_decode_8(p); dout("crush decode tunable chooseleaf_stable = %d\n", c->chooseleaf_stable); if (*p != end) { /* class_map */ ceph_decode_skip_map(p, end, 32, 32, bad); /* class_name */ ceph_decode_skip_map(p, end, 32, string, bad); /* class_bucket */ ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad); } if (*p != end) { err = decode_choose_args(p, end, c); if (err) goto fail; } done: crush_finalize(c); dout("crush_decode success\n"); return c; badmem: err = -ENOMEM; fail: dout("crush_decode fail %d\n", err); crush_destroy(c); return ERR_PTR(err); bad: err = -EINVAL; goto fail; } int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs) { if (lhs->pool < rhs->pool) return -1; if (lhs->pool > rhs->pool) return 1; if (lhs->seed < rhs->seed) return -1; if (lhs->seed > rhs->seed) return 1; return 0; } int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs) { int ret; ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid); if (ret) return ret; if (lhs->shard < rhs->shard) return -1; if (lhs->shard > rhs->shard) return 1; return 0; } static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len) { struct ceph_pg_mapping *pg; pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO); if (!pg) return NULL; RB_CLEAR_NODE(&pg->node); return pg; } static void free_pg_mapping(struct ceph_pg_mapping *pg) { WARN_ON(!RB_EMPTY_NODE(&pg->node)); kfree(pg); } /* * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid * to a set of osds) and primary_temp (explicit primary setting) */ DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare, RB_BYPTR, const struct ceph_pg *, node) /* * rbtree of pg pool info */ DEFINE_RB_FUNCS(pg_pool, struct ceph_pg_pool_info, id, node) struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) { return lookup_pg_pool(&map->pg_pools, id); } const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) { struct ceph_pg_pool_info *pi; if (id == CEPH_NOPOOL) return NULL; if (WARN_ON_ONCE(id > (u64) INT_MAX)) return NULL; pi = lookup_pg_pool(&map->pg_pools, id); return pi ? pi->name : NULL; } EXPORT_SYMBOL(ceph_pg_pool_name_by_id); int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) { struct rb_node *rbp; for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { struct ceph_pg_pool_info *pi = rb_entry(rbp, struct ceph_pg_pool_info, node); if (pi->name && strcmp(pi->name, name) == 0) return pi->id; } return -ENOENT; } EXPORT_SYMBOL(ceph_pg_poolid_by_name); u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id) { struct ceph_pg_pool_info *pi; pi = lookup_pg_pool(&map->pg_pools, id); return pi ? pi->flags : 0; } EXPORT_SYMBOL(ceph_pg_pool_flags); static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi) { erase_pg_pool(root, pi); kfree(pi->name); kfree(pi); } static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) { u8 ev, cv; unsigned len, num; void *pool_end; ceph_decode_need(p, end, 2 + 4, bad); ev = ceph_decode_8(p); /* encoding version */ cv = ceph_decode_8(p); /* compat version */ if (ev < 5) { pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv); return -EINVAL; } if (cv > 9) { pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv); return -EINVAL; } len = ceph_decode_32(p); ceph_decode_need(p, end, len, bad); pool_end = *p + len; pi->type = ceph_decode_8(p); pi->size = ceph_decode_8(p); pi->crush_ruleset = ceph_decode_8(p); pi->object_hash = ceph_decode_8(p); pi->pg_num = ceph_decode_32(p); pi->pgp_num = ceph_decode_32(p); *p += 4 + 4; /* skip lpg* */ *p += 4; /* skip last_change */ *p += 8 + 4; /* skip snap_seq, snap_epoch */ /* skip snaps */ num = ceph_decode_32(p); while (num--) { *p += 8; /* snapid key */ *p += 1 + 1; /* versions */ len = ceph_decode_32(p); *p += len; } /* skip removed_snaps */ num = ceph_decode_32(p); *p += num * (8 + 8); *p += 8; /* skip auid */ pi->flags = ceph_decode_64(p); *p += 4; /* skip crash_replay_interval */ if (ev >= 7) pi->min_size = ceph_decode_8(p); else pi->min_size = pi->size - pi->size / 2; if (ev >= 8) *p += 8 + 8; /* skip quota_max_* */ if (ev >= 9) { /* skip tiers */ num = ceph_decode_32(p); *p += num * 8; *p += 8; /* skip tier_of */ *p += 1; /* skip cache_mode */ pi->read_tier = ceph_decode_64(p); pi->write_tier = ceph_decode_64(p); } else { pi->read_tier = -1; pi->write_tier = -1; } if (ev >= 10) { /* skip properties */ num = ceph_decode_32(p); while (num--) { len = ceph_decode_32(p); *p += len; /* key */ len = ceph_decode_32(p); *p += len; /* val */ } } if (ev >= 11) { /* skip hit_set_params */ *p += 1 + 1; /* versions */ len = ceph_decode_32(p); *p += len; *p += 4; /* skip hit_set_period */ *p += 4; /* skip hit_set_count */ } if (ev >= 12) *p += 4; /* skip stripe_width */ if (ev >= 13) { *p += 8; /* skip target_max_bytes */ *p += 8; /* skip target_max_objects */ *p += 4; /* skip cache_target_dirty_ratio_micro */ *p += 4; /* skip cache_target_full_ratio_micro */ *p += 4; /* skip cache_min_flush_age */ *p += 4; /* skip cache_min_evict_age */ } if (ev >= 14) { /* skip erasure_code_profile */ len = ceph_decode_32(p); *p += len; } /* * last_force_op_resend_preluminous, will be overridden if the * map was encoded with RESEND_ON_SPLIT */ if (ev >= 15) pi->last_force_request_resend = ceph_decode_32(p); else pi->last_force_request_resend = 0; if (ev >= 16) *p += 4; /* skip min_read_recency_for_promote */ if (ev >= 17) *p += 8; /* skip expected_num_objects */ if (ev >= 19) *p += 4; /* skip cache_target_dirty_high_ratio_micro */ if (ev >= 20) *p += 4; /* skip min_write_recency_for_promote */ if (ev >= 21) *p += 1; /* skip use_gmt_hitset */ if (ev >= 22) *p += 1; /* skip fast_read */ if (ev >= 23) { *p += 4; /* skip hit_set_grade_decay_rate */ *p += 4; /* skip hit_set_search_last_n */ } if (ev >= 24) { /* skip opts */ *p += 1 + 1; /* versions */ len = ceph_decode_32(p); *p += len; } if (ev >= 25) pi->last_force_request_resend = ceph_decode_32(p); /* ignore the rest */ *p = pool_end; calc_pg_masks(pi); return 0; bad: return -EINVAL; } static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) { struct ceph_pg_pool_info *pi; u32 num, len; u64 pool; ceph_decode_32_safe(p, end, num, bad); dout(" %d pool names\n", num); while (num--) { ceph_decode_64_safe(p, end, pool, bad); ceph_decode_32_safe(p, end, len, bad); dout(" pool %llu len %d\n", pool, len); ceph_decode_need(p, end, len, bad); pi = lookup_pg_pool(&map->pg_pools, pool); if (pi) { char *name = kstrndup(*p, len, GFP_NOFS); if (!name) return -ENOMEM; kfree(pi->name); pi->name = name; dout(" name is %s\n", pi->name); } *p += len; } return 0; bad: return -EINVAL; } /* * CRUSH workspaces * * workspace_manager framework borrowed from fs/btrfs/compression.c. * Two simplifications: there is only one type of workspace and there * is always at least one workspace. */ static struct crush_work *alloc_workspace(const struct crush_map *c) { struct crush_work *work; size_t work_size; WARN_ON(!c->working_size); work_size = crush_work_size(c, CEPH_PG_MAX_SIZE); dout("%s work_size %zu bytes\n", __func__, work_size); work = kvmalloc(work_size, GFP_NOIO); if (!work) return NULL; INIT_LIST_HEAD(&work->item); crush_init_workspace(c, work); return work; } static void free_workspace(struct crush_work *work) { WARN_ON(!list_empty(&work->item)); kvfree(work); } static void init_workspace_manager(struct workspace_manager *wsm) { INIT_LIST_HEAD(&wsm->idle_ws); spin_lock_init(&wsm->ws_lock); atomic_set(&wsm->total_ws, 0); wsm->free_ws = 0; init_waitqueue_head(&wsm->ws_wait); } static void add_initial_workspace(struct workspace_manager *wsm, struct crush_work *work) { WARN_ON(!list_empty(&wsm->idle_ws)); list_add(&work->item, &wsm->idle_ws); atomic_set(&wsm->total_ws, 1); wsm->free_ws = 1; } static void cleanup_workspace_manager(struct workspace_manager *wsm) { struct crush_work *work; while (!list_empty(&wsm->idle_ws)) { work = list_first_entry(&wsm->idle_ws, struct crush_work, item); list_del_init(&work->item); free_workspace(work); } atomic_set(&wsm->total_ws, 0); wsm->free_ws = 0; } /* * Finds an available workspace or allocates a new one. If it's not * possible to allocate a new one, waits until there is one. */ static struct crush_work *get_workspace(struct workspace_manager *wsm, const struct crush_map *c) { struct crush_work *work; int cpus = num_online_cpus(); again: spin_lock(&wsm->ws_lock); if (!list_empty(&wsm->idle_ws)) { work = list_first_entry(&wsm->idle_ws, struct crush_work, item); list_del_init(&work->item); wsm->free_ws--; spin_unlock(&wsm->ws_lock); return work; } if (atomic_read(&wsm->total_ws) > cpus) { DEFINE_WAIT(wait); spin_unlock(&wsm->ws_lock); prepare_to_wait(&wsm->ws_wait, &wait, TASK_UNINTERRUPTIBLE); if (atomic_read(&wsm->total_ws) > cpus && !wsm->free_ws) schedule(); finish_wait(&wsm->ws_wait, &wait); goto again; } atomic_inc(&wsm->total_ws); spin_unlock(&wsm->ws_lock); work = alloc_workspace(c); if (!work) { atomic_dec(&wsm->total_ws); wake_up(&wsm->ws_wait); /* * Do not return the error but go back to waiting. We * have the initial workspace and the CRUSH computation * time is bounded so we will get it eventually. */ WARN_ON(atomic_read(&wsm->total_ws) < 1); goto again; } return work; } /* * Puts a workspace back on the list or frees it if we have enough * idle ones sitting around. */ static void put_workspace(struct workspace_manager *wsm, struct crush_work *work) { spin_lock(&wsm->ws_lock); if (wsm->free_ws <= num_online_cpus()) { list_add(&work->item, &wsm->idle_ws); wsm->free_ws++; spin_unlock(&wsm->ws_lock); goto wake; } spin_unlock(&wsm->ws_lock); free_workspace(work); atomic_dec(&wsm->total_ws); wake: if (wq_has_sleeper(&wsm->ws_wait)) wake_up(&wsm->ws_wait); } /* * osd map */ struct ceph_osdmap *ceph_osdmap_alloc(void) { struct ceph_osdmap *map; map = kzalloc(sizeof(*map), GFP_NOIO); if (!map) return NULL; map->pg_pools = RB_ROOT; map->pool_max = -1; map->pg_temp = RB_ROOT; map->primary_temp = RB_ROOT; map->pg_upmap = RB_ROOT; map->pg_upmap_items = RB_ROOT; init_workspace_manager(&map->crush_wsm); return map; } void ceph_osdmap_destroy(struct ceph_osdmap *map) { dout("osdmap_destroy %p\n", map); if (map->crush) crush_destroy(map->crush); cleanup_workspace_manager(&map->crush_wsm); while (!RB_EMPTY_ROOT(&map->pg_temp)) { struct ceph_pg_mapping *pg = rb_entry(rb_first(&map->pg_temp), struct ceph_pg_mapping, node); erase_pg_mapping(&map->pg_temp, pg); free_pg_mapping(pg); } while (!RB_EMPTY_ROOT(&map->primary_temp)) { struct ceph_pg_mapping *pg = rb_entry(rb_first(&map->primary_temp), struct ceph_pg_mapping, node); erase_pg_mapping(&map->primary_temp, pg); free_pg_mapping(pg); } while (!RB_EMPTY_ROOT(&map->pg_upmap)) { struct ceph_pg_mapping *pg = rb_entry(rb_first(&map->pg_upmap), struct ceph_pg_mapping, node); rb_erase(&pg->node, &map->pg_upmap); kfree(pg); } while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) { struct ceph_pg_mapping *pg = rb_entry(rb_first(&map->pg_upmap_items), struct ceph_pg_mapping, node); rb_erase(&pg->node, &map->pg_upmap_items); kfree(pg); } while (!RB_EMPTY_ROOT(&map->pg_pools)) { struct ceph_pg_pool_info *pi = rb_entry(rb_first(&map->pg_pools), struct ceph_pg_pool_info, node); __remove_pg_pool(&map->pg_pools, pi); } kvfree(map->osd_state); kvfree(map->osd_weight); kvfree(map->osd_addr); kvfree(map->osd_primary_affinity); kfree(map); } /* * Adjust max_osd value, (re)allocate arrays. * * The new elements are properly initialized. */ static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max) { u32 *state; u32 *weight; struct ceph_entity_addr *addr; u32 to_copy; int i; dout("%s old %u new %u\n", __func__, map->max_osd, max); if (max == map->max_osd) return 0; state = kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS); weight = kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS); addr = kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS); if (!state || !weight || !addr) { kvfree(state); kvfree(weight); kvfree(addr); return -ENOMEM; } to_copy = min(map->max_osd, max); if (map->osd_state) { memcpy(state, map->osd_state, to_copy * sizeof(*state)); memcpy(weight, map->osd_weight, to_copy * sizeof(*weight)); memcpy(addr, map->osd_addr, to_copy * sizeof(*addr)); kvfree(map->osd_state); kvfree(map->osd_weight); kvfree(map->osd_addr); } map->osd_state = state; map->osd_weight = weight; map->osd_addr = addr; for (i = map->max_osd; i < max; i++) { map->osd_state[i] = 0; map->osd_weight[i] = CEPH_OSD_OUT; memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); } if (map->osd_primary_affinity) { u32 *affinity; affinity = kvmalloc(array_size(max, sizeof(*affinity)), GFP_NOFS); if (!affinity) return -ENOMEM; memcpy(affinity, map->osd_primary_affinity, to_copy * sizeof(*affinity)); kvfree(map->osd_primary_affinity); map->osd_primary_affinity = affinity; for (i = map->max_osd; i < max; i++) map->osd_primary_affinity[i] = CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; } map->max_osd = max; return 0; } static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) { struct crush_work *work; if (IS_ERR(crush)) return PTR_ERR(crush); work = alloc_workspace(crush); if (!work) { crush_destroy(crush); return -ENOMEM; } if (map->crush) crush_destroy(map->crush); cleanup_workspace_manager(&map->crush_wsm); map->crush = crush; add_initial_workspace(&map->crush_wsm, work); return 0; } #define OSDMAP_WRAPPER_COMPAT_VER 7 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1 /* * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps, * to struct_v of the client_data section for new (v7 and above) * osdmaps. */ static int get_osdmap_client_data_v(void **p, void *end, const char *prefix, u8 *v) { u8 struct_v; ceph_decode_8_safe(p, end, struct_v, e_inval); if (struct_v >= 7) { u8 struct_compat; ceph_decode_8_safe(p, end, struct_compat, e_inval); if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) { pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n", struct_v, struct_compat, OSDMAP_WRAPPER_COMPAT_VER, prefix); return -EINVAL; } *p += 4; /* ignore wrapper struct_len */ ceph_decode_8_safe(p, end, struct_v, e_inval); ceph_decode_8_safe(p, end, struct_compat, e_inval); if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) { pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n", struct_v, struct_compat, OSDMAP_CLIENT_DATA_COMPAT_VER, prefix); return -EINVAL; } *p += 4; /* ignore client data struct_len */ } else { u16 version; *p -= 1; ceph_decode_16_safe(p, end, version, e_inval); if (version < 6) { pr_warn("got v %d < 6 of %s ceph_osdmap\n", version, prefix); return -EINVAL; } /* old osdmap encoding */ struct_v = 0; } *v = struct_v; return 0; e_inval: return -EINVAL; } static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, bool incremental) { u32 n; ceph_decode_32_safe(p, end, n, e_inval); while (n--) { struct ceph_pg_pool_info *pi; u64 pool; int ret; ceph_decode_64_safe(p, end, pool, e_inval); pi = lookup_pg_pool(&map->pg_pools, pool); if (!incremental || !pi) { pi = kzalloc(sizeof(*pi), GFP_NOFS); if (!pi) return -ENOMEM; RB_CLEAR_NODE(&pi->node); pi->id = pool; if (!__insert_pg_pool(&map->pg_pools, pi)) { kfree(pi); return -EEXIST; } } ret = decode_pool(p, end, pi); if (ret) return ret; } return 0; e_inval: return -EINVAL; } static int decode_pools(void **p, void *end, struct ceph_osdmap *map) { return __decode_pools(p, end, map, false); } static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) { return __decode_pools(p, end, map, true); } typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool); static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root, decode_mapping_fn_t fn, bool incremental) { u32 n; WARN_ON(!incremental && !fn); ceph_decode_32_safe(p, end, n, e_inval); while (n--) { struct ceph_pg_mapping *pg; struct ceph_pg pgid; int ret; ret = ceph_decode_pgid(p, end, &pgid); if (ret) return ret; pg = lookup_pg_mapping(mapping_root, &pgid); if (pg) { WARN_ON(!incremental); erase_pg_mapping(mapping_root, pg); free_pg_mapping(pg); } if (fn) { pg = fn(p, end, incremental); if (IS_ERR(pg)) return PTR_ERR(pg); if (pg) { pg->pgid = pgid; /* struct */ insert_pg_mapping(mapping_root, pg); } } } return 0; e_inval: return -EINVAL; } static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, bool incremental) { struct ceph_pg_mapping *pg; u32 len, i; ceph_decode_32_safe(p, end, len, e_inval); if (len == 0 && incremental) return NULL; /* new_pg_temp: [] to remove */ if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) return ERR_PTR(-EINVAL); ceph_decode_need(p, end, len * sizeof(u32), e_inval); pg = alloc_pg_mapping(len * sizeof(u32)); if (!pg) return ERR_PTR(-ENOMEM); pg->pg_temp.len = len; for (i = 0; i < len; i++) pg->pg_temp.osds[i] = ceph_decode_32(p); return pg; e_inval: return ERR_PTR(-EINVAL); } static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, false); } static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, true); } static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end, bool incremental) { struct ceph_pg_mapping *pg; u32 osd; ceph_decode_32_safe(p, end, osd, e_inval); if (osd == (u32)-1 && incremental) return NULL; /* new_primary_temp: -1 to remove */ pg = alloc_pg_mapping(0); if (!pg) return ERR_PTR(-ENOMEM); pg->primary_temp.osd = osd; return pg; e_inval: return ERR_PTR(-EINVAL); } static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->primary_temp, __decode_primary_temp, false); } static int decode_new_primary_temp(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->primary_temp, __decode_primary_temp, true); } u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) { BUG_ON(osd >= map->max_osd); if (!map->osd_primary_affinity) return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; return map->osd_primary_affinity[osd]; } static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) { BUG_ON(osd >= map->max_osd); if (!map->osd_primary_affinity) { int i; map->osd_primary_affinity = kvmalloc( array_size(map->max_osd, sizeof(*map->osd_primary_affinity)), GFP_NOFS); if (!map->osd_primary_affinity) return -ENOMEM; for (i = 0; i < map->max_osd; i++) map->osd_primary_affinity[i] = CEPH_OSD_DEFAULT_PRIMARY_AFFINITY; } map->osd_primary_affinity[osd] = aff; return 0; } static int decode_primary_affinity(void **p, void *end, struct ceph_osdmap *map) { u32 len, i; ceph_decode_32_safe(p, end, len, e_inval); if (len == 0) { kvfree(map->osd_primary_affinity); map->osd_primary_affinity = NULL; return 0; } if (len != map->max_osd) goto e_inval; ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); for (i = 0; i < map->max_osd; i++) { int ret; ret = set_primary_affinity(map, i, ceph_decode_32(p)); if (ret) return ret; } return 0; e_inval: return -EINVAL; } static int decode_new_primary_affinity(void **p, void *end, struct ceph_osdmap *map) { u32 n; ceph_decode_32_safe(p, end, n, e_inval); while (n--) { u32 osd, aff; int ret; ceph_decode_32_safe(p, end, osd, e_inval); ceph_decode_32_safe(p, end, aff, e_inval); ret = set_primary_affinity(map, osd, aff); if (ret) return ret; osdmap_info(map, "osd%d primary-affinity 0x%x\n", osd, aff); } return 0; e_inval: return -EINVAL; } static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end, bool __unused) { return __decode_pg_temp(p, end, false); } static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, false); } static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, true); } static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true); } static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, bool __unused) { struct ceph_pg_mapping *pg; u32 len, i; ceph_decode_32_safe(p, end, len, e_inval); if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) return ERR_PTR(-EINVAL); ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); pg = alloc_pg_mapping(2 * len * sizeof(u32)); if (!pg) return ERR_PTR(-ENOMEM); pg->pg_upmap_items.len = len; for (i = 0; i < len; i++) { pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p); pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p); } return pg; e_inval: return ERR_PTR(-EINVAL); } static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->pg_upmap_items, __decode_pg_upmap_items, false); } static int decode_new_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->pg_upmap_items, __decode_pg_upmap_items, true); } static int decode_old_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) { return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true); } /* * decode a full map. */ static int osdmap_decode(void **p, void *end, bool msgr2, struct ceph_osdmap *map) { u8 struct_v; u32 epoch = 0; void *start = *p; u32 max; u32 len, i; int err; dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); err = get_osdmap_client_data_v(p, end, "full", &struct_v); if (err) goto bad; /* fsid, epoch, created, modified */ ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + sizeof(map->created) + sizeof(map->modified), e_inval); ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); epoch = map->epoch = ceph_decode_32(p); ceph_decode_copy(p, &map->created, sizeof(map->created)); ceph_decode_copy(p, &map->modified, sizeof(map->modified)); /* pools */ err = decode_pools(p, end, map); if (err) goto bad; /* pool_name */ err = decode_pool_names(p, end, map); if (err) goto bad; ceph_decode_32_safe(p, end, map->pool_max, e_inval); ceph_decode_32_safe(p, end, map->flags, e_inval); /* max_osd */ ceph_decode_32_safe(p, end, max, e_inval); /* (re)alloc osd arrays */ err = osdmap_set_max_osd(map, max); if (err) goto bad; /* osd_state, osd_weight, osd_addrs->client_addr */ ceph_decode_need(p, end, 3*sizeof(u32) + map->max_osd*(struct_v >= 5 ? sizeof(u32) : sizeof(u8)) + sizeof(*map->osd_weight), e_inval); if (ceph_decode_32(p) != map->max_osd) goto e_inval; if (struct_v >= 5) { for (i = 0; i < map->max_osd; i++) map->osd_state[i] = ceph_decode_32(p); } else { for (i = 0; i < map->max_osd; i++) map->osd_state[i] = ceph_decode_8(p); } if (ceph_decode_32(p) != map->max_osd) goto e_inval; for (i = 0; i < map->max_osd; i++) map->osd_weight[i] = ceph_decode_32(p); if (ceph_decode_32(p) != map->max_osd) goto e_inval; for (i = 0; i < map->max_osd; i++) { struct ceph_entity_addr *addr = &map->osd_addr[i]; if (struct_v >= 8) err = ceph_decode_entity_addrvec(p, end, msgr2, addr); else err = ceph_decode_entity_addr(p, end, addr); if (err) goto bad; dout("%s osd%d addr %s\n", __func__, i, ceph_pr_addr(addr)); } /* pg_temp */ err = decode_pg_temp(p, end, map); if (err) goto bad; /* primary_temp */ if (struct_v >= 1) { err = decode_primary_temp(p, end, map); if (err) goto bad; } /* primary_affinity */ if (struct_v >= 2) { err = decode_primary_affinity(p, end, map); if (err) goto bad; } else { WARN_ON(map->osd_primary_affinity); } /* crush */ ceph_decode_32_safe(p, end, len, e_inval); err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); if (err) goto bad; *p += len; if (struct_v >= 3) { /* erasure_code_profiles */ ceph_decode_skip_map_of_map(p, end, string, string, string, e_inval); } if (struct_v >= 4) { err = decode_pg_upmap(p, end, map); if (err) goto bad; err = decode_pg_upmap_items(p, end, map); if (err) goto bad; } else { WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap)); WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items)); } /* ignore the rest */ *p = end; dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); return 0; e_inval: err = -EINVAL; bad: pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n", err, epoch, (int)(*p - start), *p, start, end); print_hex_dump(KERN_DEBUG, "osdmap: ", DUMP_PREFIX_OFFSET, 16, 1, start, end - start, true); return err; } /* * Allocate and decode a full map. */ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end, bool msgr2) { struct ceph_osdmap *map; int ret; map = ceph_osdmap_alloc(); if (!map) return ERR_PTR(-ENOMEM); ret = osdmap_decode(p, end, msgr2, map); if (ret) { ceph_osdmap_destroy(map); return ERR_PTR(ret); } return map; } /* * Encoding order is (new_up_client, new_state, new_weight). Need to * apply in the (new_weight, new_state, new_up_client) order, because * an incremental map may look like e.g. * * new_up_client: { osd=6, addr=... } # set osd_state and addr * new_state: { osd=6, xorstate=EXISTS } # clear osd_state */ static int decode_new_up_state_weight(void **p, void *end, u8 struct_v, bool msgr2, struct ceph_osdmap *map) { void *new_up_client; void *new_state; void *new_weight_end; u32 len; int ret; int i; new_up_client = *p; ceph_decode_32_safe(p, end, len, e_inval); for (i = 0; i < len; ++i) { struct ceph_entity_addr addr; ceph_decode_skip_32(p, end, e_inval); if (struct_v >= 7) ret = ceph_decode_entity_addrvec(p, end, msgr2, &addr); else ret = ceph_decode_entity_addr(p, end, &addr); if (ret) return ret; } new_state = *p; ceph_decode_32_safe(p, end, len, e_inval); len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8)); ceph_decode_need(p, end, len, e_inval); *p += len; /* new_weight */ ceph_decode_32_safe(p, end, len, e_inval); while (len--) { s32 osd; u32 w; ceph_decode_need(p, end, 2*sizeof(u32), e_inval); osd = ceph_decode_32(p); w = ceph_decode_32(p); BUG_ON(osd >= map->max_osd); osdmap_info(map, "osd%d weight 0x%x %s\n", osd, w, w == CEPH_OSD_IN ? "(in)" : (w == CEPH_OSD_OUT ? "(out)" : "")); map->osd_weight[osd] = w; /* * If we are marking in, set the EXISTS, and clear the * AUTOOUT and NEW bits. */ if (w) { map->osd_state[osd] |= CEPH_OSD_EXISTS; map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | CEPH_OSD_NEW); } } new_weight_end = *p; /* new_state (up/down) */ *p = new_state; len = ceph_decode_32(p); while (len--) { s32 osd; u32 xorstate; osd = ceph_decode_32(p); if (struct_v >= 5) xorstate = ceph_decode_32(p); else xorstate = ceph_decode_8(p); if (xorstate == 0) xorstate = CEPH_OSD_UP; BUG_ON(osd >= map->max_osd); if ((map->osd_state[osd] & CEPH_OSD_UP) && (xorstate & CEPH_OSD_UP)) osdmap_info(map, "osd%d down\n", osd); if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && (xorstate & CEPH_OSD_EXISTS)) { osdmap_info(map, "osd%d does not exist\n", osd); ret = set_primary_affinity(map, osd, CEPH_OSD_DEFAULT_PRIMARY_AFFINITY); if (ret) return ret; memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); map->osd_state[osd] = 0; } else { map->osd_state[osd] ^= xorstate; } } /* new_up_client */ *p = new_up_client; len = ceph_decode_32(p); while (len--) { s32 osd; struct ceph_entity_addr addr; osd = ceph_decode_32(p); BUG_ON(osd >= map->max_osd); if (struct_v >= 7) ret = ceph_decode_entity_addrvec(p, end, msgr2, &addr); else ret = ceph_decode_entity_addr(p, end, &addr); if (ret) return ret; dout("%s osd%d addr %s\n", __func__, osd, ceph_pr_addr(&addr)); osdmap_info(map, "osd%d up\n", osd); map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; map->osd_addr[osd] = addr; } *p = new_weight_end; return 0; e_inval: return -EINVAL; } /* * decode and apply an incremental map update. */ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2, struct ceph_osdmap *map) { struct ceph_fsid fsid; u32 epoch = 0; struct ceph_timespec modified; s32 len; u64 pool; __s64 new_pool_max; __s32 new_flags, max; void *start = *p; int err; u8 struct_v; dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p)); err = get_osdmap_client_data_v(p, end, "inc", &struct_v); if (err) goto bad; /* fsid, epoch, modified, new_pool_max, new_flags */ ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) + sizeof(u64) + sizeof(u32), e_inval); ceph_decode_copy(p, &fsid, sizeof(fsid)); epoch = ceph_decode_32(p); BUG_ON(epoch != map->epoch+1); ceph_decode_copy(p, &modified, sizeof(modified)); new_pool_max = ceph_decode_64(p); new_flags = ceph_decode_32(p); /* full map? */ ceph_decode_32_safe(p, end, len, e_inval); if (len > 0) { dout("apply_incremental full map len %d, %p to %p\n", len, *p, end); return ceph_osdmap_decode(p, min(*p+len, end), msgr2); } /* new crush? */ ceph_decode_32_safe(p, end, len, e_inval); if (len > 0) { err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); if (err) goto bad; *p += len; } /* new flags? */ if (new_flags >= 0) map->flags = new_flags; if (new_pool_max >= 0) map->pool_max = new_pool_max; /* new max? */ ceph_decode_32_safe(p, end, max, e_inval); if (max >= 0) { err = osdmap_set_max_osd(map, max); if (err) goto bad; } map->epoch++; map->modified = modified; /* new_pools */ err = decode_new_pools(p, end, map); if (err) goto bad; /* new_pool_names */ err = decode_pool_names(p, end, map); if (err) goto bad; /* old_pool */ ceph_decode_32_safe(p, end, len, e_inval); while (len--) { struct ceph_pg_pool_info *pi; ceph_decode_64_safe(p, end, pool, e_inval); pi = lookup_pg_pool(&map->pg_pools, pool); if (pi) __remove_pg_pool(&map->pg_pools, pi); } /* new_up_client, new_state, new_weight */ err = decode_new_up_state_weight(p, end, struct_v, msgr2, map); if (err) goto bad; /* new_pg_temp */ err = decode_new_pg_temp(p, end, map); if (err) goto bad; /* new_primary_temp */ if (struct_v >= 1) { err = decode_new_primary_temp(p, end, map); if (err) goto bad; } /* new_primary_affinity */ if (struct_v >= 2) { err = decode_new_primary_affinity(p, end, map); if (err) goto bad; } if (struct_v >= 3) { /* new_erasure_code_profiles */ ceph_decode_skip_map_of_map(p, end, string, string, string, e_inval); /* old_erasure_code_profiles */ ceph_decode_skip_set(p, end, string, e_inval); } if (struct_v >= 4) { err = decode_new_pg_upmap(p, end, map); if (err) goto bad; err = decode_old_pg_upmap(p, end, map); if (err) goto bad; err = decode_new_pg_upmap_items(p, end, map); if (err) goto bad; err = decode_old_pg_upmap_items(p, end, map); if (err) goto bad; } /* ignore the rest */ *p = end; dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); return map; e_inval: err = -EINVAL; bad: pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n", err, epoch, (int)(*p - start), *p, start, end); print_hex_dump(KERN_DEBUG, "osdmap: ", DUMP_PREFIX_OFFSET, 16, 1, start, end - start, true); return ERR_PTR(err); } void ceph_oloc_copy(struct ceph_object_locator *dest, const struct ceph_object_locator *src) { ceph_oloc_destroy(dest); dest->pool = src->pool; if (src->pool_ns) dest->pool_ns = ceph_get_string(src->pool_ns); else dest->pool_ns = NULL; } EXPORT_SYMBOL(ceph_oloc_copy); void ceph_oloc_destroy(struct ceph_object_locator *oloc) { ceph_put_string(oloc->pool_ns); } EXPORT_SYMBOL(ceph_oloc_destroy); void ceph_oid_copy(struct ceph_object_id *dest, const struct ceph_object_id *src) { ceph_oid_destroy(dest); if (src->name != src->inline_name) { /* very rare, see ceph_object_id definition */ dest->name = kmalloc(src->name_len + 1, GFP_NOIO | __GFP_NOFAIL); } else { dest->name = dest->inline_name; } memcpy(dest->name, src->name, src->name_len + 1); dest->name_len = src->name_len; } EXPORT_SYMBOL(ceph_oid_copy); static __printf(2, 0) int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap) { int len; WARN_ON(!ceph_oid_empty(oid)); len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap); if (len >= sizeof(oid->inline_name)) return len; oid->name_len = len; return 0; } /* * If oid doesn't fit into inline buffer, BUG. */ void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...) { va_list ap; va_start(ap, fmt); BUG_ON(oid_printf_vargs(oid, fmt, ap)); va_end(ap); } EXPORT_SYMBOL(ceph_oid_printf); static __printf(3, 0) int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp, const char *fmt, va_list ap) { va_list aq; int len; va_copy(aq, ap); len = oid_printf_vargs(oid, fmt, aq); va_end(aq); if (len) { char *external_name; external_name = kmalloc(len + 1, gfp); if (!external_name) return -ENOMEM; oid->name = external_name; WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len); oid->name_len = len; } return 0; } /* * If oid doesn't fit into inline buffer, allocate. */ int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp, const char *fmt, ...) { va_list ap; int ret; va_start(ap, fmt); ret = oid_aprintf_vargs(oid, gfp, fmt, ap); va_end(ap); return ret; } EXPORT_SYMBOL(ceph_oid_aprintf); void ceph_oid_destroy(struct ceph_object_id *oid) { if (oid->name != oid->inline_name) kfree(oid->name); } EXPORT_SYMBOL(ceph_oid_destroy); /* * osds only */ static bool __osds_equal(const struct ceph_osds *lhs, const struct ceph_osds *rhs) { if (lhs->size == rhs->size && !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0]))) return true; return false; } /* * osds + primary */ static bool osds_equal(const struct ceph_osds *lhs, const struct ceph_osds *rhs) { if (__osds_equal(lhs, rhs) && lhs->primary == rhs->primary) return true; return false; } static bool osds_valid(const struct ceph_osds *set) { /* non-empty set */ if (set->size > 0 && set->primary >= 0) return true; /* empty can_shift_osds set */ if (!set->size && set->primary == -1) return true; /* empty !can_shift_osds set - all NONE */ if (set->size > 0 && set->primary == -1) { int i; for (i = 0; i < set->size; i++) { if (set->osds[i] != CRUSH_ITEM_NONE) break; } if (i == set->size) return true; } return false; } void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src) { memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0])); dest->size = src->size; dest->primary = src->primary; } bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num, u32 new_pg_num) { int old_bits = calc_bits_of(old_pg_num); int old_mask = (1 << old_bits) - 1; int n; WARN_ON(pgid->seed >= old_pg_num); if (new_pg_num <= old_pg_num) return false; for (n = 1; ; n++) { int next_bit = n << (old_bits - 1); u32 s = next_bit | pgid->seed; if (s < old_pg_num || s == pgid->seed) continue; if (s >= new_pg_num) break; s = ceph_stable_mod(s, old_pg_num, old_mask); if (s == pgid->seed) return true; } return false; } bool ceph_is_new_interval(const struct ceph_osds *old_acting, const struct ceph_osds *new_acting, const struct ceph_osds *old_up, const struct ceph_osds *new_up, int old_size, int new_size, int old_min_size, int new_min_size, u32 old_pg_num, u32 new_pg_num, bool old_sort_bitwise, bool new_sort_bitwise, bool old_recovery_deletes, bool new_recovery_deletes, const struct ceph_pg *pgid) { return !osds_equal(old_acting, new_acting) || !osds_equal(old_up, new_up) || old_size != new_size || old_min_size != new_min_size || ceph_pg_is_split(pgid, old_pg_num, new_pg_num) || old_sort_bitwise != new_sort_bitwise || old_recovery_deletes != new_recovery_deletes; } static int calc_pg_rank(int osd, const struct ceph_osds *acting) { int i; for (i = 0; i < acting->size; i++) { if (acting->osds[i] == osd) return i; } return -1; } static bool primary_changed(const struct ceph_osds *old_acting, const struct ceph_osds *new_acting) { if (!old_acting->size && !new_acting->size) return false; /* both still empty */ if (!old_acting->size ^ !new_acting->size) return true; /* was empty, now not, or vice versa */ if (old_acting->primary != new_acting->primary) return true; /* primary changed */ if (calc_pg_rank(old_acting->primary, old_acting) != calc_pg_rank(new_acting->primary, new_acting)) return true; return false; /* same primary (tho replicas may have changed) */ } bool ceph_osds_changed(const struct ceph_osds *old_acting, const struct ceph_osds *new_acting, bool any_change) { if (primary_changed(old_acting, new_acting)) return true; if (any_change && !__osds_equal(old_acting, new_acting)) return true; return false; } /* * Map an object into a PG. * * Should only be called with target_oid and target_oloc (as opposed to * base_oid and base_oloc), since tiering isn't taken into account. */ void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi, const struct ceph_object_id *oid, const struct ceph_object_locator *oloc, struct ceph_pg *raw_pgid) { WARN_ON(pi->id != oloc->pool); if (!oloc->pool_ns) { raw_pgid->pool = oloc->pool; raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name, oid->name_len); dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name, raw_pgid->pool, raw_pgid->seed); } else { char stack_buf[256]; char *buf = stack_buf; int nsl = oloc->pool_ns->len; size_t total = nsl + 1 + oid->name_len; if (total > sizeof(stack_buf)) buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL); memcpy(buf, oloc->pool_ns->str, nsl); buf[nsl] = '\037'; memcpy(buf + nsl + 1, oid->name, oid->name_len); raw_pgid->pool = oloc->pool; raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total); if (buf != stack_buf) kfree(buf); dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__, oid->name, nsl, oloc->pool_ns->str, raw_pgid->pool, raw_pgid->seed); } } int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap, const struct ceph_object_id *oid, const struct ceph_object_locator *oloc, struct ceph_pg *raw_pgid) { struct ceph_pg_pool_info *pi; pi = ceph_pg_pool_by_id(osdmap, oloc->pool); if (!pi) return -ENOENT; __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid); return 0; } EXPORT_SYMBOL(ceph_object_locator_to_pg); /* * Map a raw PG (full precision ps) into an actual PG. */ static void raw_pg_to_pg(struct ceph_pg_pool_info *pi, const struct ceph_pg *raw_pgid, struct ceph_pg *pgid) { pgid->pool = raw_pgid->pool; pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num, pi->pg_num_mask); } /* * Map a raw PG (full precision ps) into a placement ps (placement * seed). Include pool id in that value so that different pools don't * use the same seeds. */ static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi, const struct ceph_pg *raw_pgid) { if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) { /* hash pool id and seed so that pool PGs do not overlap */ return crush_hash32_2(CRUSH_HASH_RJENKINS1, ceph_stable_mod(raw_pgid->seed, pi->pgp_num, pi->pgp_num_mask), raw_pgid->pool); } else { /* * legacy behavior: add ps and pool together. this is * not a great approach because the PGs from each pool * will overlap on top of each other: 0.5 == 1.4 == * 2.3 == ... */ return ceph_stable_mod(raw_pgid->seed, pi->pgp_num, pi->pgp_num_mask) + (unsigned)raw_pgid->pool; } } /* * Magic value used for a "default" fallback choose_args, used if the * crush_choose_arg_map passed to do_crush() does not exist. If this * also doesn't exist, fall back to canonical weights. */ #define CEPH_DEFAULT_CHOOSE_ARGS -1 static int do_crush(struct ceph_osdmap *map, int ruleno, int x, int *result, int result_max, const __u32 *weight, int weight_max, s64 choose_args_index) { struct crush_choose_arg_map *arg_map; struct crush_work *work; int r; BUG_ON(result_max > CEPH_PG_MAX_SIZE); arg_map = lookup_choose_arg_map(&map->crush->choose_args, choose_args_index); if (!arg_map) arg_map = lookup_choose_arg_map(&map->crush->choose_args, CEPH_DEFAULT_CHOOSE_ARGS); work = get_workspace(&map->crush_wsm, map->crush); r = crush_do_rule(map->crush, ruleno, x, result, result_max, weight, weight_max, work, arg_map ? arg_map->args : NULL); put_workspace(&map->crush_wsm, work); return r; } static void remove_nonexistent_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pi, struct ceph_osds *set) { int i; if (ceph_can_shift_osds(pi)) { int removed = 0; /* shift left */ for (i = 0; i < set->size; i++) { if (!ceph_osd_exists(osdmap, set->osds[i])) { removed++; continue; } if (removed) set->osds[i - removed] = set->osds[i]; } set->size -= removed; } else { /* set dne devices to NONE */ for (i = 0; i < set->size; i++) { if (!ceph_osd_exists(osdmap, set->osds[i])) set->osds[i] = CRUSH_ITEM_NONE; } } } /* * Calculate raw set (CRUSH output) for given PG and filter out * nonexistent OSDs. ->primary is undefined for a raw set. * * Placement seed (CRUSH input) is returned through @ppps. */ static void pg_to_raw_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pi, const struct ceph_pg *raw_pgid, struct ceph_osds *raw, u32 *ppps) { u32 pps = raw_pg_to_pps(pi, raw_pgid); int ruleno; int len; ceph_osds_init(raw); if (ppps) *ppps = pps; ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type, pi->size); if (ruleno < 0) { pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n", pi->id, pi->crush_ruleset, pi->type, pi->size); return; } if (pi->size > ARRAY_SIZE(raw->osds)) { pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n", pi->id, pi->crush_ruleset, pi->type, pi->size, ARRAY_SIZE(raw->osds)); return; } len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size, osdmap->osd_weight, osdmap->max_osd, pi->id); if (len < 0) { pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n", len, ruleno, pi->id, pi->crush_ruleset, pi->type, pi->size); return; } raw->size = len; remove_nonexistent_osds(osdmap, pi, raw); } /* apply pg_upmap[_items] mappings */ static void apply_upmap(struct ceph_osdmap *osdmap, const struct ceph_pg *pgid, struct ceph_osds *raw) { struct ceph_pg_mapping *pg; int i, j; pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid); if (pg) { /* make sure targets aren't marked out */ for (i = 0; i < pg->pg_upmap.len; i++) { int osd = pg->pg_upmap.osds[i]; if (osd != CRUSH_ITEM_NONE && osd < osdmap->max_osd && osdmap->osd_weight[osd] == 0) { /* reject/ignore explicit mapping */ return; } } for (i = 0; i < pg->pg_upmap.len; i++) raw->osds[i] = pg->pg_upmap.osds[i]; raw->size = pg->pg_upmap.len; /* check and apply pg_upmap_items, if any */ } pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid); if (pg) { /* * Note: this approach does not allow a bidirectional swap, * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1]. */ for (i = 0; i < pg->pg_upmap_items.len; i++) { int from = pg->pg_upmap_items.from_to[i][0]; int to = pg->pg_upmap_items.from_to[i][1]; int pos = -1; bool exists = false; /* make sure replacement doesn't already appear */ for (j = 0; j < raw->size; j++) { int osd = raw->osds[j]; if (osd == to) { exists = true; break; } /* ignore mapping if target is marked out */ if (osd == from && pos < 0 && !(to != CRUSH_ITEM_NONE && to < osdmap->max_osd && osdmap->osd_weight[to] == 0)) { pos = j; } } if (!exists && pos >= 0) raw->osds[pos] = to; } } } /* * Given raw set, calculate up set and up primary. By definition of an * up set, the result won't contain nonexistent or down OSDs. * * This is done in-place - on return @set is the up set. If it's * empty, ->primary will remain undefined. */ static void raw_to_up_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pi, struct ceph_osds *set) { int i; /* ->primary is undefined for a raw set */ BUG_ON(set->primary != -1); if (ceph_can_shift_osds(pi)) { int removed = 0; /* shift left */ for (i = 0; i < set->size; i++) { if (ceph_osd_is_down(osdmap, set->osds[i])) { removed++; continue; } if (removed) set->osds[i - removed] = set->osds[i]; } set->size -= removed; if (set->size > 0) set->primary = set->osds[0]; } else { /* set down/dne devices to NONE */ for (i = set->size - 1; i >= 0; i--) { if (ceph_osd_is_down(osdmap, set->osds[i])) set->osds[i] = CRUSH_ITEM_NONE; else set->primary = set->osds[i]; } } } static void apply_primary_affinity(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pi, u32 pps, struct ceph_osds *up) { int i; int pos = -1; /* * Do we have any non-default primary_affinity values for these * osds? */ if (!osdmap->osd_primary_affinity) return; for (i = 0; i < up->size; i++) { int osd = up->osds[i]; if (osd != CRUSH_ITEM_NONE && osdmap->osd_primary_affinity[osd] != CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) { break; } } if (i == up->size) return; /* * Pick the primary. Feed both the seed (for the pg) and the * osd into the hash/rng so that a proportional fraction of an * osd's pgs get rejected as primary. */ for (i = 0; i < up->size; i++) { int osd = up->osds[i]; u32 aff; if (osd == CRUSH_ITEM_NONE) continue; aff = osdmap->osd_primary_affinity[osd]; if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY && (crush_hash32_2(CRUSH_HASH_RJENKINS1, pps, osd) >> 16) >= aff) { /* * We chose not to use this primary. Note it * anyway as a fallback in case we don't pick * anyone else, but keep looking. */ if (pos < 0) pos = i; } else { pos = i; break; } } if (pos < 0) return; up->primary = up->osds[pos]; if (ceph_can_shift_osds(pi) && pos > 0) { /* move the new primary to the front */ for (i = pos; i > 0; i--) up->osds[i] = up->osds[i - 1]; up->osds[0] = up->primary; } } /* * Get pg_temp and primary_temp mappings for given PG. * * Note that a PG may have none, only pg_temp, only primary_temp or * both pg_temp and primary_temp mappings. This means @temp isn't * always a valid OSD set on return: in the "only primary_temp" case, * @temp will have its ->primary >= 0 but ->size == 0. */ static void get_temp_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pi, const struct ceph_pg *pgid, struct ceph_osds *temp) { struct ceph_pg_mapping *pg; int i; ceph_osds_init(temp); /* pg_temp? */ pg = lookup_pg_mapping(&osdmap->pg_temp, pgid); if (pg) { for (i = 0; i < pg->pg_temp.len; i++) { if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) { if (ceph_can_shift_osds(pi)) continue; temp->osds[temp->size++] = CRUSH_ITEM_NONE; } else { temp->osds[temp->size++] = pg->pg_temp.osds[i]; } } /* apply pg_temp's primary */ for (i = 0; i < temp->size; i++) { if (temp->osds[i] != CRUSH_ITEM_NONE) { temp->primary = temp->osds[i]; break; } } } /* primary_temp? */ pg = lookup_pg_mapping(&osdmap->primary_temp, pgid); if (pg) temp->primary = pg->primary_temp.osd; } /* * Map a PG to its acting set as well as its up set. * * Acting set is used for data mapping purposes, while up set can be * recorded for detecting interval changes and deciding whether to * resend a request. */ void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pi, const struct ceph_pg *raw_pgid, struct ceph_osds *up, struct ceph_osds *acting) { struct ceph_pg pgid; u32 pps; WARN_ON(pi->id != raw_pgid->pool); raw_pg_to_pg(pi, raw_pgid, &pgid); pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps); apply_upmap(osdmap, &pgid, up); raw_to_up_osds(osdmap, pi, up); apply_primary_affinity(osdmap, pi, pps, up); get_temp_osds(osdmap, pi, &pgid, acting); if (!acting->size) { memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0])); acting->size = up->size; if (acting->primary == -1) acting->primary = up->primary; } WARN_ON(!osds_valid(up) || !osds_valid(acting)); } bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap, struct ceph_pg_pool_info *pi, const struct ceph_pg *raw_pgid, struct ceph_spg *spgid) { struct ceph_pg pgid; struct ceph_osds up, acting; int i; WARN_ON(pi->id != raw_pgid->pool); raw_pg_to_pg(pi, raw_pgid, &pgid); if (ceph_can_shift_osds(pi)) { spgid->pgid = pgid; /* struct */ spgid->shard = CEPH_SPG_NOSHARD; return true; } ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting); for (i = 0; i < acting.size; i++) { if (acting.osds[i] == acting.primary) { spgid->pgid = pgid; /* struct */ spgid->shard = i; return true; } } return false; } /* * Return acting primary for given PG, or -1 if none. */ int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap, const struct ceph_pg *raw_pgid) { struct ceph_pg_pool_info *pi; struct ceph_osds up, acting; pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool); if (!pi) return -1; ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting); return acting.primary; } EXPORT_SYMBOL(ceph_pg_to_acting_primary); static struct crush_loc_node *alloc_crush_loc(size_t type_name_len, size_t name_len) { struct crush_loc_node *loc; loc = kmalloc(sizeof(*loc) + type_name_len + name_len + 2, GFP_NOIO); if (!loc) return NULL; RB_CLEAR_NODE(&loc->cl_node); return loc; } static void free_crush_loc(struct crush_loc_node *loc) { WARN_ON(!RB_EMPTY_NODE(&loc->cl_node)); kfree(loc); } static int crush_loc_compare(const struct crush_loc *loc1, const struct crush_loc *loc2) { return strcmp(loc1->cl_type_name, loc2->cl_type_name) ?: strcmp(loc1->cl_name, loc2->cl_name); } DEFINE_RB_FUNCS2(crush_loc, struct crush_loc_node, cl_loc, crush_loc_compare, RB_BYPTR, const struct crush_loc *, cl_node) /* * Parses a set of <bucket type name>':'<bucket name> pairs separated * by '|', e.g. "rack:foo1|rack:foo2|datacenter:bar". * * Note that @crush_location is modified by strsep(). */ int ceph_parse_crush_location(char *crush_location, struct rb_root *locs) { struct crush_loc_node *loc; const char *type_name, *name, *colon; size_t type_name_len, name_len; dout("%s '%s'\n", __func__, crush_location); while ((type_name = strsep(&crush_location, "|"))) { colon = strchr(type_name, ':'); if (!colon) return -EINVAL; type_name_len = colon - type_name; if (type_name_len == 0) return -EINVAL; name = colon + 1; name_len = strlen(name); if (name_len == 0) return -EINVAL; loc = alloc_crush_loc(type_name_len, name_len); if (!loc) return -ENOMEM; loc->cl_loc.cl_type_name = loc->cl_data; memcpy(loc->cl_loc.cl_type_name, type_name, type_name_len); loc->cl_loc.cl_type_name[type_name_len] = '\0'; loc->cl_loc.cl_name = loc->cl_data + type_name_len + 1; memcpy(loc->cl_loc.cl_name, name, name_len); loc->cl_loc.cl_name[name_len] = '\0'; if (!__insert_crush_loc(locs, loc)) { free_crush_loc(loc); return -EEXIST; } dout("%s type_name '%s' name '%s'\n", __func__, loc->cl_loc.cl_type_name, loc->cl_loc.cl_name); } return 0; } int ceph_compare_crush_locs(struct rb_root *locs1, struct rb_root *locs2) { struct rb_node *n1 = rb_first(locs1); struct rb_node *n2 = rb_first(locs2); int ret; for ( ; n1 && n2; n1 = rb_next(n1), n2 = rb_next(n2)) { struct crush_loc_node *loc1 = rb_entry(n1, struct crush_loc_node, cl_node); struct crush_loc_node *loc2 = rb_entry(n2, struct crush_loc_node, cl_node); ret = crush_loc_compare(&loc1->cl_loc, &loc2->cl_loc); if (ret) return ret; } if (!n1 && n2) return -1; if (n1 && !n2) return 1; return 0; } void ceph_clear_crush_locs(struct rb_root *locs) { while (!RB_EMPTY_ROOT(locs)) { struct crush_loc_node *loc = rb_entry(rb_first(locs), struct crush_loc_node, cl_node); erase_crush_loc(locs, loc); free_crush_loc(loc); } } /* * [a-zA-Z0-9-_.]+ */ static bool is_valid_crush_name(const char *name) { do { if (!('a' <= *name && *name <= 'z') && !('A' <= *name && *name <= 'Z') && !('0' <= *name && *name <= '9') && *name != '-' && *name != '_' && *name != '.') return false; } while (*++name != '\0'); return true; } /* * Gets the parent of an item. Returns its id (<0 because the * parent is always a bucket), type id (>0 for the same reason, * via @parent_type_id) and location (via @parent_loc). If no * parent, returns 0. * * Does a linear search, as there are no parent pointers of any * kind. Note that the result is ambiguous for items that occur * multiple times in the map. */ static int get_immediate_parent(struct crush_map *c, int id, u16 *parent_type_id, struct crush_loc *parent_loc) { struct crush_bucket *b; struct crush_name_node *type_cn, *cn; int i, j; for (i = 0; i < c->max_buckets; i++) { b = c->buckets[i]; if (!b) continue; /* ignore per-class shadow hierarchy */ cn = lookup_crush_name(&c->names, b->id); if (!cn || !is_valid_crush_name(cn->cn_name)) continue; for (j = 0; j < b->size; j++) { if (b->items[j] != id) continue; *parent_type_id = b->type; type_cn = lookup_crush_name(&c->type_names, b->type); parent_loc->cl_type_name = type_cn->cn_name; parent_loc->cl_name = cn->cn_name; return b->id; } } return 0; /* no parent */ } /* * Calculates the locality/distance from an item to a client * location expressed in terms of CRUSH hierarchy as a set of * (bucket type name, bucket name) pairs. Specifically, looks * for the lowest-valued bucket type for which the location of * @id matches one of the locations in @locs, so for standard * bucket types (host = 1, rack = 3, datacenter = 8, zone = 9) * a matching host is closer than a matching rack and a matching * data center is closer than a matching zone. * * Specifying multiple locations (a "multipath" location) such * as "rack=foo1 rack=foo2 datacenter=bar" is allowed -- @locs * is a multimap. The locality will be: * * - 3 for OSDs in racks foo1 and foo2 * - 8 for OSDs in data center bar * - -1 for all other OSDs * * The lowest possible bucket type is 1, so the best locality * for an OSD is 1 (i.e. a matching host). Locality 0 would be * the OSD itself. */ int ceph_get_crush_locality(struct ceph_osdmap *osdmap, int id, struct rb_root *locs) { struct crush_loc loc; u16 type_id; /* * Instead of repeated get_immediate_parent() calls, * the location of @id could be obtained with a single * depth-first traversal. */ for (;;) { id = get_immediate_parent(osdmap->crush, id, &type_id, &loc); if (id >= 0) return -1; /* not local */ if (lookup_crush_loc(locs, &loc)) return type_id; } } |
| 78 74 105 167 5 33 165 11 11 39 37 71 45 44 36 36 3 3 20 2 95 79 12 12 2 185 185 33 31 23 53 21 19 146 163 145 145 167 150 97 94 159 79 11 273 4 4 6 101 18 93 73 2 4 8 70 70 4 84 5 2 8 11 11 4 2 5 27 27 94 90 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM f2fs #if !defined(_TRACE_F2FS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_F2FS_H #include <linux/tracepoint.h> #include <uapi/linux/f2fs.h> #define show_dev(dev) MAJOR(dev), MINOR(dev) #define show_dev_ino(entry) show_dev(entry->dev), (unsigned long)entry->ino TRACE_DEFINE_ENUM(NODE); TRACE_DEFINE_ENUM(DATA); TRACE_DEFINE_ENUM(META); TRACE_DEFINE_ENUM(META_FLUSH); TRACE_DEFINE_ENUM(IPU); TRACE_DEFINE_ENUM(OPU); TRACE_DEFINE_ENUM(HOT); TRACE_DEFINE_ENUM(WARM); TRACE_DEFINE_ENUM(COLD); TRACE_DEFINE_ENUM(CURSEG_HOT_DATA); TRACE_DEFINE_ENUM(CURSEG_WARM_DATA); TRACE_DEFINE_ENUM(CURSEG_COLD_DATA); TRACE_DEFINE_ENUM(CURSEG_HOT_NODE); TRACE_DEFINE_ENUM(CURSEG_WARM_NODE); TRACE_DEFINE_ENUM(CURSEG_COLD_NODE); TRACE_DEFINE_ENUM(NO_CHECK_TYPE); TRACE_DEFINE_ENUM(GC_GREEDY); TRACE_DEFINE_ENUM(GC_CB); TRACE_DEFINE_ENUM(FG_GC); TRACE_DEFINE_ENUM(BG_GC); TRACE_DEFINE_ENUM(LFS); TRACE_DEFINE_ENUM(SSR); TRACE_DEFINE_ENUM(__REQ_RAHEAD); TRACE_DEFINE_ENUM(__REQ_SYNC); TRACE_DEFINE_ENUM(__REQ_IDLE); TRACE_DEFINE_ENUM(__REQ_PREFLUSH); TRACE_DEFINE_ENUM(__REQ_FUA); TRACE_DEFINE_ENUM(__REQ_PRIO); TRACE_DEFINE_ENUM(__REQ_META); TRACE_DEFINE_ENUM(CP_UMOUNT); TRACE_DEFINE_ENUM(CP_FASTBOOT); TRACE_DEFINE_ENUM(CP_SYNC); TRACE_DEFINE_ENUM(CP_RECOVERY); TRACE_DEFINE_ENUM(CP_DISCARD); TRACE_DEFINE_ENUM(CP_TRIMMED); TRACE_DEFINE_ENUM(CP_PAUSE); TRACE_DEFINE_ENUM(CP_RESIZE); TRACE_DEFINE_ENUM(EX_READ); TRACE_DEFINE_ENUM(EX_BLOCK_AGE); #define show_block_type(type) \ __print_symbolic(type, \ { NODE, "NODE" }, \ { DATA, "DATA" }, \ { META, "META" }, \ { META_FLUSH, "META_FLUSH" }, \ { IPU, "IN-PLACE" }, \ { OPU, "OUT-OF-PLACE" }) #define show_block_temp(temp) \ __print_symbolic(temp, \ { HOT, "HOT" }, \ { WARM, "WARM" }, \ { COLD, "COLD" }) #define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_META | REQ_PRIO | \ REQ_PREFLUSH | REQ_FUA) #define F2FS_BIO_FLAG_MASK(t) (__force u32)((t) & F2FS_OP_FLAGS) #define show_bio_type(op,op_flags) show_bio_op(op), \ show_bio_op_flags(op_flags) #define show_bio_op(op) blk_op_str(op) #define show_bio_op_flags(flags) \ __print_flags(F2FS_BIO_FLAG_MASK(flags), "|", \ { (__force u32)REQ_RAHEAD, "R" }, \ { (__force u32)REQ_SYNC, "S" }, \ { (__force u32)REQ_META, "M" }, \ { (__force u32)REQ_PRIO, "P" }, \ { (__force u32)REQ_PREFLUSH, "PF" }, \ { (__force u32)REQ_FUA, "FUA" }) #define show_data_type(type) \ __print_symbolic(type, \ { CURSEG_HOT_DATA, "Hot DATA" }, \ { CURSEG_WARM_DATA, "Warm DATA" }, \ { CURSEG_COLD_DATA, "Cold DATA" }, \ { CURSEG_HOT_NODE, "Hot NODE" }, \ { CURSEG_WARM_NODE, "Warm NODE" }, \ { CURSEG_COLD_NODE, "Cold NODE" }, \ { NO_CHECK_TYPE, "No TYPE" }) #define show_file_type(type) \ __print_symbolic(type, \ { 0, "FILE" }, \ { 1, "DIR" }) #define show_gc_type(type) \ __print_symbolic(type, \ { FG_GC, "Foreground GC" }, \ { BG_GC, "Background GC" }) #define show_alloc_mode(type) \ __print_symbolic(type, \ { LFS, "LFS-mode" }, \ { SSR, "SSR-mode" }, \ { AT_SSR, "AT_SSR-mode" }) #define show_victim_policy(type) \ __print_symbolic(type, \ { GC_GREEDY, "Greedy" }, \ { GC_CB, "Cost-Benefit" }, \ { GC_AT, "Age-threshold" }) #define show_cpreason(type) \ __print_flags(type, "|", \ { CP_UMOUNT, "Umount" }, \ { CP_FASTBOOT, "Fastboot" }, \ { CP_SYNC, "Sync" }, \ { CP_RECOVERY, "Recovery" }, \ { CP_DISCARD, "Discard" }, \ { CP_PAUSE, "Pause" }, \ { CP_TRIMMED, "Trimmed" }, \ { CP_RESIZE, "Resize" }) #define show_fsync_cpreason(type) \ __print_symbolic(type, \ { CP_NO_NEEDED, "no needed" }, \ { CP_NON_REGULAR, "non regular" }, \ { CP_COMPRESSED, "compressed" }, \ { CP_HARDLINK, "hardlink" }, \ { CP_SB_NEED_CP, "sb needs cp" }, \ { CP_WRONG_PINO, "wrong pino" }, \ { CP_NO_SPC_ROLL, "no space roll forward" }, \ { CP_NODE_NEED_CP, "node needs cp" }, \ { CP_FASTBOOT_MODE, "fastboot mode" }, \ { CP_SPEC_LOG_NUM, "log type is 2" }, \ { CP_RECOVER_DIR, "dir needs recovery" }) #define show_shutdown_mode(type) \ __print_symbolic(type, \ { F2FS_GOING_DOWN_FULLSYNC, "full sync" }, \ { F2FS_GOING_DOWN_METASYNC, "meta sync" }, \ { F2FS_GOING_DOWN_NOSYNC, "no sync" }, \ { F2FS_GOING_DOWN_METAFLUSH, "meta flush" }, \ { F2FS_GOING_DOWN_NEED_FSCK, "need fsck" }) #define show_compress_algorithm(type) \ __print_symbolic(type, \ { COMPRESS_LZO, "LZO" }, \ { COMPRESS_LZ4, "LZ4" }, \ { COMPRESS_ZSTD, "ZSTD" }, \ { COMPRESS_LZORLE, "LZO-RLE" }) #define show_extent_type(type) \ __print_symbolic(type, \ { EX_READ, "Read" }, \ { EX_BLOCK_AGE, "Block Age" }) #define show_inode_type(x) \ __print_symbolic(x, \ { S_IFLNK, "symbolic" }, \ { S_IFREG, "regular" }, \ { S_IFDIR, "directory" }, \ { S_IFCHR, "character" }, \ { S_IFBLK, "block" }, \ { S_IFIFO, "fifo" }, \ { S_IFSOCK, "sock" }) #define S_ALL_PERM (S_ISUID | S_ISGID | S_ISVTX | \ S_IRWXU | S_IRWXG | S_IRWXO) struct f2fs_sb_info; struct f2fs_io_info; struct extent_info; struct victim_sel_policy; struct f2fs_map_blocks; DECLARE_EVENT_CLASS(f2fs__inode, TP_PROTO(struct inode *inode), TP_ARGS(inode), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(ino_t, pino) __field(umode_t, mode) __field(loff_t, size) __field(unsigned int, nlink) __field(blkcnt_t, blocks) __field(__u8, advise) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pino = F2FS_I(inode)->i_pino; __entry->mode = inode->i_mode; __entry->nlink = inode->i_nlink; __entry->size = inode->i_size; __entry->blocks = inode->i_blocks; __entry->advise = F2FS_I(inode)->i_advise; ), TP_printk("dev = (%d,%d), ino = %lu, pino = %lu, i_mode = 0x%hx, " "i_size = %lld, i_nlink = %u, i_blocks = %llu, i_advise = 0x%x", show_dev_ino(__entry), (unsigned long)__entry->pino, __entry->mode, __entry->size, (unsigned int)__entry->nlink, (unsigned long long)__entry->blocks, (unsigned char)__entry->advise) ); DECLARE_EVENT_CLASS(f2fs__inode_exit, TP_PROTO(struct inode *inode, int ret), TP_ARGS(inode, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(umode_t, mode) __field(int, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->mode = inode->i_mode; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, type: %s, mode = 0%o, ret = %d", show_dev_ino(__entry), show_inode_type(__entry->mode & S_IFMT), __entry->mode & S_ALL_PERM, __entry->ret) ); DEFINE_EVENT(f2fs__inode, f2fs_sync_file_enter, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); TRACE_EVENT(f2fs_sync_file_exit, TP_PROTO(struct inode *inode, int cp_reason, int datasync, int ret), TP_ARGS(inode, cp_reason, datasync, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(int, cp_reason) __field(int, datasync) __field(int, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->cp_reason = cp_reason; __entry->datasync = datasync; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, cp_reason: %s, " "datasync = %d, ret = %d", show_dev_ino(__entry), show_fsync_cpreason(__entry->cp_reason), __entry->datasync, __entry->ret) ); TRACE_EVENT(f2fs_sync_fs, TP_PROTO(struct super_block *sb, int wait), TP_ARGS(sb, wait), TP_STRUCT__entry( __field(dev_t, dev) __field(int, dirty) __field(int, wait) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->dirty = is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY); __entry->wait = wait; ), TP_printk("dev = (%d,%d), superblock is %s, wait = %d", show_dev(__entry->dev), __entry->dirty ? "dirty" : "not dirty", __entry->wait) ); DEFINE_EVENT(f2fs__inode, f2fs_iget, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(f2fs__inode_exit, f2fs_iget_exit, TP_PROTO(struct inode *inode, int ret), TP_ARGS(inode, ret) ); DEFINE_EVENT(f2fs__inode, f2fs_evict_inode, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); DEFINE_EVENT(f2fs__inode_exit, f2fs_new_inode, TP_PROTO(struct inode *inode, int ret), TP_ARGS(inode, ret) ); TRACE_EVENT(f2fs_unlink_enter, TP_PROTO(struct inode *dir, struct dentry *dentry), TP_ARGS(dir, dentry), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, size) __field(blkcnt_t, blocks) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->ino = dir->i_ino; __entry->size = dir->i_size; __entry->blocks = dir->i_blocks; __assign_str(name); ), TP_printk("dev = (%d,%d), dir ino = %lu, i_size = %lld, " "i_blocks = %llu, name = %s", show_dev_ino(__entry), __entry->size, (unsigned long long)__entry->blocks, __get_str(name)) ); DEFINE_EVENT(f2fs__inode_exit, f2fs_unlink_exit, TP_PROTO(struct inode *inode, int ret), TP_ARGS(inode, ret) ); DEFINE_EVENT(f2fs__inode_exit, f2fs_drop_inode, TP_PROTO(struct inode *inode, int ret), TP_ARGS(inode, ret) ); DEFINE_EVENT(f2fs__inode, f2fs_truncate, TP_PROTO(struct inode *inode), TP_ARGS(inode) ); TRACE_EVENT(f2fs_truncate_data_blocks_range, TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs, int free), TP_ARGS(inode, nid, ofs, free), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(nid_t, nid) __field(unsigned int, ofs) __field(int, free) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->nid = nid; __entry->ofs = ofs; __entry->free = free; ), TP_printk("dev = (%d,%d), ino = %lu, nid = %u, offset = %u, freed = %d", show_dev_ino(__entry), (unsigned int)__entry->nid, __entry->ofs, __entry->free) ); DECLARE_EVENT_CLASS(f2fs__truncate_op, TP_PROTO(struct inode *inode, u64 from), TP_ARGS(inode, from), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, size) __field(blkcnt_t, blocks) __field(u64, from) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->size = inode->i_size; __entry->blocks = inode->i_blocks; __entry->from = from; ), TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld, i_blocks = %llu, " "start file offset = %llu", show_dev_ino(__entry), __entry->size, (unsigned long long)__entry->blocks, (unsigned long long)__entry->from) ); DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_blocks_enter, TP_PROTO(struct inode *inode, u64 from), TP_ARGS(inode, from) ); DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_blocks_exit, TP_PROTO(struct inode *inode, int ret), TP_ARGS(inode, ret) ); DEFINE_EVENT(f2fs__truncate_op, f2fs_truncate_inode_blocks_enter, TP_PROTO(struct inode *inode, u64 from), TP_ARGS(inode, from) ); DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_inode_blocks_exit, TP_PROTO(struct inode *inode, int ret), TP_ARGS(inode, ret) ); DECLARE_EVENT_CLASS(f2fs__truncate_node, TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), TP_ARGS(inode, nid, blk_addr), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(nid_t, nid) __field(block_t, blk_addr) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->nid = nid; __entry->blk_addr = blk_addr; ), TP_printk("dev = (%d,%d), ino = %lu, nid = %u, block_address = 0x%llx", show_dev_ino(__entry), (unsigned int)__entry->nid, (unsigned long long)__entry->blk_addr) ); DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_nodes_enter, TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), TP_ARGS(inode, nid, blk_addr) ); DEFINE_EVENT(f2fs__inode_exit, f2fs_truncate_nodes_exit, TP_PROTO(struct inode *inode, int ret), TP_ARGS(inode, ret) ); DEFINE_EVENT(f2fs__truncate_node, f2fs_truncate_node, TP_PROTO(struct inode *inode, nid_t nid, block_t blk_addr), TP_ARGS(inode, nid, blk_addr) ); TRACE_EVENT(f2fs_truncate_partial_nodes, TP_PROTO(struct inode *inode, nid_t *nid, int depth, int err), TP_ARGS(inode, nid, depth, err), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __array(nid_t, nid, 3) __field(int, depth) __field(int, err) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->nid[0] = nid[0]; __entry->nid[1] = nid[1]; __entry->nid[2] = nid[2]; __entry->depth = depth; __entry->err = err; ), TP_printk("dev = (%d,%d), ino = %lu, " "nid[0] = %u, nid[1] = %u, nid[2] = %u, depth = %d, err = %d", show_dev_ino(__entry), (unsigned int)__entry->nid[0], (unsigned int)__entry->nid[1], (unsigned int)__entry->nid[2], __entry->depth, __entry->err) ); TRACE_EVENT(f2fs_file_write_iter, TP_PROTO(struct inode *inode, loff_t offset, size_t length, ssize_t ret), TP_ARGS(inode, offset, length, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, offset) __field(size_t, length) __field(ssize_t, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->offset = offset; __entry->length = length; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, " "offset = %lld, length = %zu, written(err) = %zd", show_dev_ino(__entry), __entry->offset, __entry->length, __entry->ret) ); TRACE_EVENT(f2fs_map_blocks, TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int flag, int ret), TP_ARGS(inode, map, flag, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(block_t, m_lblk) __field(block_t, m_pblk) __field(unsigned int, m_len) __field(unsigned int, m_flags) __field(int, m_seg_type) __field(bool, m_may_create) __field(bool, m_multidev_dio) __field(int, flag) __field(int, ret) ), TP_fast_assign( __entry->dev = map->m_bdev->bd_dev; __entry->ino = inode->i_ino; __entry->m_lblk = map->m_lblk; __entry->m_pblk = map->m_pblk; __entry->m_len = map->m_len; __entry->m_flags = map->m_flags; __entry->m_seg_type = map->m_seg_type; __entry->m_may_create = map->m_may_create; __entry->m_multidev_dio = map->m_multidev_dio; __entry->flag = flag; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, file offset = %llu, " "start blkaddr = 0x%llx, len = 0x%llx, flags = %u, " "seg_type = %d, may_create = %d, multidevice = %d, " "flag = %d, err = %d", show_dev_ino(__entry), (unsigned long long)__entry->m_lblk, (unsigned long long)__entry->m_pblk, (unsigned long long)__entry->m_len, __entry->m_flags, __entry->m_seg_type, __entry->m_may_create, __entry->m_multidev_dio, __entry->flag, __entry->ret) ); TRACE_EVENT(f2fs_background_gc, TP_PROTO(struct super_block *sb, unsigned int wait_ms, unsigned int prefree, unsigned int free), TP_ARGS(sb, wait_ms, prefree, free), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, wait_ms) __field(unsigned int, prefree) __field(unsigned int, free) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->wait_ms = wait_ms; __entry->prefree = prefree; __entry->free = free; ), TP_printk("dev = (%d,%d), wait_ms = %u, prefree = %u, free = %u", show_dev(__entry->dev), __entry->wait_ms, __entry->prefree, __entry->free) ); TRACE_EVENT(f2fs_gc_begin, TP_PROTO(struct super_block *sb, int gc_type, bool no_bg_gc, unsigned int nr_free_secs, long long dirty_nodes, long long dirty_dents, long long dirty_imeta, unsigned int free_sec, unsigned int free_seg, int reserved_seg, unsigned int prefree_seg), TP_ARGS(sb, gc_type, no_bg_gc, nr_free_secs, dirty_nodes, dirty_dents, dirty_imeta, free_sec, free_seg, reserved_seg, prefree_seg), TP_STRUCT__entry( __field(dev_t, dev) __field(int, gc_type) __field(bool, no_bg_gc) __field(unsigned int, nr_free_secs) __field(long long, dirty_nodes) __field(long long, dirty_dents) __field(long long, dirty_imeta) __field(unsigned int, free_sec) __field(unsigned int, free_seg) __field(int, reserved_seg) __field(unsigned int, prefree_seg) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->gc_type = gc_type; __entry->no_bg_gc = no_bg_gc; __entry->nr_free_secs = nr_free_secs; __entry->dirty_nodes = dirty_nodes; __entry->dirty_dents = dirty_dents; __entry->dirty_imeta = dirty_imeta; __entry->free_sec = free_sec; __entry->free_seg = free_seg; __entry->reserved_seg = reserved_seg; __entry->prefree_seg = prefree_seg; ), TP_printk("dev = (%d,%d), gc_type = %s, no_background_GC = %d, nr_free_secs = %u, " "nodes = %lld, dents = %lld, imeta = %lld, free_sec:%u, free_seg:%u, " "rsv_seg:%d, prefree_seg:%u", show_dev(__entry->dev), show_gc_type(__entry->gc_type), (__entry->gc_type == BG_GC) ? __entry->no_bg_gc : -1, __entry->nr_free_secs, __entry->dirty_nodes, __entry->dirty_dents, __entry->dirty_imeta, __entry->free_sec, __entry->free_seg, __entry->reserved_seg, __entry->prefree_seg) ); TRACE_EVENT(f2fs_gc_end, TP_PROTO(struct super_block *sb, int ret, int seg_freed, int sec_freed, long long dirty_nodes, long long dirty_dents, long long dirty_imeta, unsigned int free_sec, unsigned int free_seg, int reserved_seg, unsigned int prefree_seg), TP_ARGS(sb, ret, seg_freed, sec_freed, dirty_nodes, dirty_dents, dirty_imeta, free_sec, free_seg, reserved_seg, prefree_seg), TP_STRUCT__entry( __field(dev_t, dev) __field(int, ret) __field(int, seg_freed) __field(int, sec_freed) __field(long long, dirty_nodes) __field(long long, dirty_dents) __field(long long, dirty_imeta) __field(unsigned int, free_sec) __field(unsigned int, free_seg) __field(int, reserved_seg) __field(unsigned int, prefree_seg) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->ret = ret; __entry->seg_freed = seg_freed; __entry->sec_freed = sec_freed; __entry->dirty_nodes = dirty_nodes; __entry->dirty_dents = dirty_dents; __entry->dirty_imeta = dirty_imeta; __entry->free_sec = free_sec; __entry->free_seg = free_seg; __entry->reserved_seg = reserved_seg; __entry->prefree_seg = prefree_seg; ), TP_printk("dev = (%d,%d), ret = %d, seg_freed = %d, sec_freed = %d, " "nodes = %lld, dents = %lld, imeta = %lld, free_sec:%u, " "free_seg:%u, rsv_seg:%d, prefree_seg:%u", show_dev(__entry->dev), __entry->ret, __entry->seg_freed, __entry->sec_freed, __entry->dirty_nodes, __entry->dirty_dents, __entry->dirty_imeta, __entry->free_sec, __entry->free_seg, __entry->reserved_seg, __entry->prefree_seg) ); TRACE_EVENT(f2fs_get_victim, TP_PROTO(struct super_block *sb, int type, int gc_type, struct victim_sel_policy *p, unsigned int pre_victim, unsigned int prefree, unsigned int free), TP_ARGS(sb, type, gc_type, p, pre_victim, prefree, free), TP_STRUCT__entry( __field(dev_t, dev) __field(int, type) __field(int, gc_type) __field(int, alloc_mode) __field(int, gc_mode) __field(unsigned int, victim) __field(unsigned int, cost) __field(unsigned int, ofs_unit) __field(unsigned int, pre_victim) __field(unsigned int, prefree) __field(unsigned int, free) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->type = type; __entry->gc_type = gc_type; __entry->alloc_mode = p->alloc_mode; __entry->gc_mode = p->gc_mode; __entry->victim = p->min_segno; __entry->cost = p->min_cost; __entry->ofs_unit = p->ofs_unit; __entry->pre_victim = pre_victim; __entry->prefree = prefree; __entry->free = free; ), TP_printk("dev = (%d,%d), type = %s, policy = (%s, %s, %s), " "victim = %u, cost = %u, ofs_unit = %u, " "pre_victim_secno = %d, prefree = %u, free = %u", show_dev(__entry->dev), show_data_type(__entry->type), show_gc_type(__entry->gc_type), show_alloc_mode(__entry->alloc_mode), show_victim_policy(__entry->gc_mode), __entry->victim, __entry->cost, __entry->ofs_unit, (int)__entry->pre_victim, __entry->prefree, __entry->free) ); TRACE_EVENT(f2fs_lookup_start, TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), TP_ARGS(dir, dentry, flags), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __string(name, dentry->d_name.name) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->ino = dir->i_ino; __assign_str(name); __entry->flags = flags; ), TP_printk("dev = (%d,%d), pino = %lu, name:%s, flags:%u", show_dev_ino(__entry), __get_str(name), __entry->flags) ); TRACE_EVENT(f2fs_lookup_end, TP_PROTO(struct inode *dir, struct dentry *dentry, nid_t ino, int err), TP_ARGS(dir, dentry, ino, err), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __string(name, dentry->d_name.name) __field(nid_t, cino) __field(int, err) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->ino = dir->i_ino; __assign_str(name); __entry->cino = ino; __entry->err = err; ), TP_printk("dev = (%d,%d), pino = %lu, name:%s, ino:%u, err:%d", show_dev_ino(__entry), __get_str(name), __entry->cino, __entry->err) ); TRACE_EVENT(f2fs_rename_start, TP_PROTO(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags), TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, flags), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __string(old_name, old_dentry->d_name.name) __field(ino_t, new_pino) __string(new_name, new_dentry->d_name.name) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = old_dir->i_sb->s_dev; __entry->ino = old_dir->i_ino; __assign_str(old_name); __entry->new_pino = new_dir->i_ino; __assign_str(new_name); __entry->flags = flags; ), TP_printk("dev = (%d,%d), old_dir = %lu, old_name: %s, " "new_dir = %lu, new_name: %s, flags = %u", show_dev_ino(__entry), __get_str(old_name), __entry->new_pino, __get_str(new_name), __entry->flags) ); TRACE_EVENT(f2fs_rename_end, TP_PROTO(struct dentry *old_dentry, struct dentry *new_dentry, unsigned int flags, int ret), TP_ARGS(old_dentry, new_dentry, flags, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __string(old_name, old_dentry->d_name.name) __string(new_name, new_dentry->d_name.name) __field(unsigned int, flags) __field(int, ret) ), TP_fast_assign( __entry->dev = old_dentry->d_sb->s_dev; __entry->ino = old_dentry->d_inode->i_ino; __assign_str(old_name); __assign_str(new_name); __entry->flags = flags; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, old_name: %s, " "new_name: %s, flags = %u, ret = %d", show_dev_ino(__entry), __get_str(old_name), __get_str(new_name), __entry->flags, __entry->ret) ); TRACE_EVENT(f2fs_readdir, TP_PROTO(struct inode *dir, loff_t start_pos, loff_t end_pos, int err), TP_ARGS(dir, start_pos, end_pos, err), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, start) __field(loff_t, end) __field(int, err) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->ino = dir->i_ino; __entry->start = start_pos; __entry->end = end_pos; __entry->err = err; ), TP_printk("dev = (%d,%d), ino = %lu, start_pos:%llu, end_pos:%llu, err:%d", show_dev_ino(__entry), __entry->start, __entry->end, __entry->err) ); TRACE_EVENT(f2fs_fallocate, TP_PROTO(struct inode *inode, int mode, loff_t offset, loff_t len, int ret), TP_ARGS(inode, mode, offset, len, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(int, mode) __field(loff_t, offset) __field(loff_t, len) __field(loff_t, size) __field(blkcnt_t, blocks) __field(int, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->mode = mode; __entry->offset = offset; __entry->len = len; __entry->size = inode->i_size; __entry->blocks = inode->i_blocks; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, mode = %x, offset = %lld, " "len = %lld, i_size = %lld, i_blocks = %llu, ret = %d", show_dev_ino(__entry), __entry->mode, (unsigned long long)__entry->offset, (unsigned long long)__entry->len, (unsigned long long)__entry->size, (unsigned long long)__entry->blocks, __entry->ret) ); TRACE_EVENT(f2fs_direct_IO_enter, TP_PROTO(struct inode *inode, struct kiocb *iocb, long len, int rw), TP_ARGS(inode, iocb, len, rw), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, ki_pos) __field(int, ki_flags) __field(u16, ki_ioprio) __field(unsigned long, len) __field(int, rw) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->ki_pos = iocb->ki_pos; __entry->ki_flags = iocb->ki_flags; __entry->ki_ioprio = iocb->ki_ioprio; __entry->len = len; __entry->rw = rw; ), TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_ioprio = %x rw = %d", show_dev_ino(__entry), __entry->ki_pos, __entry->len, __entry->ki_flags, __entry->ki_ioprio, __entry->rw) ); TRACE_EVENT(f2fs_direct_IO_exit, TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret), TP_ARGS(inode, offset, len, rw, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, pos) __field(unsigned long, len) __field(int, rw) __field(int, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pos = offset; __entry->len = len; __entry->rw = rw; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu " "rw = %d ret = %d", show_dev_ino(__entry), __entry->pos, __entry->len, __entry->rw, __entry->ret) ); TRACE_EVENT(f2fs_reserve_new_blocks, TP_PROTO(struct inode *inode, nid_t nid, unsigned int ofs_in_node, blkcnt_t count), TP_ARGS(inode, nid, ofs_in_node, count), TP_STRUCT__entry( __field(dev_t, dev) __field(nid_t, nid) __field(unsigned int, ofs_in_node) __field(blkcnt_t, count) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->nid = nid; __entry->ofs_in_node = ofs_in_node; __entry->count = count; ), TP_printk("dev = (%d,%d), nid = %u, ofs_in_node = %u, count = %llu", show_dev(__entry->dev), (unsigned int)__entry->nid, __entry->ofs_in_node, (unsigned long long)__entry->count) ); DECLARE_EVENT_CLASS(f2fs__submit_page_bio, TP_PROTO(struct page *page, struct f2fs_io_info *fio), TP_ARGS(page, fio), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(pgoff_t, index) __field(block_t, old_blkaddr) __field(block_t, new_blkaddr) __field(enum req_op, op) __field(blk_opf_t, op_flags) __field(int, temp) __field(int, type) ), TP_fast_assign( __entry->dev = page_file_mapping(page)->host->i_sb->s_dev; __entry->ino = page_file_mapping(page)->host->i_ino; __entry->index = page->index; __entry->old_blkaddr = fio->old_blkaddr; __entry->new_blkaddr = fio->new_blkaddr; __entry->op = fio->op; __entry->op_flags = fio->op_flags; __entry->temp = fio->temp; __entry->type = fio->type; ), TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, " "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s_%s", show_dev_ino(__entry), (unsigned long)__entry->index, (unsigned long long)__entry->old_blkaddr, (unsigned long long)__entry->new_blkaddr, show_bio_type(__entry->op, __entry->op_flags), show_block_temp(__entry->temp), show_block_type(__entry->type)) ); DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_bio, TP_PROTO(struct page *page, struct f2fs_io_info *fio), TP_ARGS(page, fio), TP_CONDITION(page->mapping) ); DEFINE_EVENT_CONDITION(f2fs__submit_page_bio, f2fs_submit_page_write, TP_PROTO(struct page *page, struct f2fs_io_info *fio), TP_ARGS(page, fio), TP_CONDITION(page->mapping) ); DECLARE_EVENT_CLASS(f2fs__bio, TP_PROTO(struct super_block *sb, int type, struct bio *bio), TP_ARGS(sb, type, bio), TP_STRUCT__entry( __field(dev_t, dev) __field(dev_t, target) __field(enum req_op, op) __field(blk_opf_t, op_flags) __field(int, type) __field(sector_t, sector) __field(unsigned int, size) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->target = bio_dev(bio); __entry->op = bio_op(bio); __entry->op_flags = bio->bi_opf; __entry->type = type; __entry->sector = bio->bi_iter.bi_sector; __entry->size = bio->bi_iter.bi_size; ), TP_printk("dev = (%d,%d)/(%d,%d), rw = %s(%s), %s, sector = %lld, size = %u", show_dev(__entry->target), show_dev(__entry->dev), show_bio_type(__entry->op, __entry->op_flags), show_block_type(__entry->type), (unsigned long long)__entry->sector, __entry->size) ); DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_write_bio, TP_PROTO(struct super_block *sb, int type, struct bio *bio), TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_prepare_read_bio, TP_PROTO(struct super_block *sb, int type, struct bio *bio), TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_read_bio, TP_PROTO(struct super_block *sb, int type, struct bio *bio), TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); DEFINE_EVENT_CONDITION(f2fs__bio, f2fs_submit_write_bio, TP_PROTO(struct super_block *sb, int type, struct bio *bio), TP_ARGS(sb, type, bio), TP_CONDITION(bio) ); TRACE_EVENT(f2fs_write_begin, TP_PROTO(struct inode *inode, loff_t pos, unsigned int len), TP_ARGS(inode, pos, len), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, pos) __field(unsigned int, len) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pos = pos; __entry->len = len; ), TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u", show_dev_ino(__entry), (unsigned long long)__entry->pos, __entry->len) ); TRACE_EVENT(f2fs_write_end, TP_PROTO(struct inode *inode, loff_t pos, unsigned int len, unsigned int copied), TP_ARGS(inode, pos, len, copied), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(loff_t, pos) __field(unsigned int, len) __field(unsigned int, copied) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pos = pos; __entry->len = len; __entry->copied = copied; ), TP_printk("dev = (%d,%d), ino = %lu, pos = %llu, len = %u, copied = %u", show_dev_ino(__entry), (unsigned long long)__entry->pos, __entry->len, __entry->copied) ); DECLARE_EVENT_CLASS(f2fs__folio, TP_PROTO(struct folio *folio, int type), TP_ARGS(folio, type), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(int, type) __field(int, dir) __field(pgoff_t, index) __field(int, dirty) __field(int, uptodate) ), TP_fast_assign( __entry->dev = folio_file_mapping(folio)->host->i_sb->s_dev; __entry->ino = folio_file_mapping(folio)->host->i_ino; __entry->type = type; __entry->dir = S_ISDIR(folio_file_mapping(folio)->host->i_mode); __entry->index = folio_index(folio); __entry->dirty = folio_test_dirty(folio); __entry->uptodate = folio_test_uptodate(folio); ), TP_printk("dev = (%d,%d), ino = %lu, %s, %s, index = %lu, " "dirty = %d, uptodate = %d", show_dev_ino(__entry), show_block_type(__entry->type), show_file_type(__entry->dir), (unsigned long)__entry->index, __entry->dirty, __entry->uptodate) ); DEFINE_EVENT(f2fs__folio, f2fs_writepage, TP_PROTO(struct folio *folio, int type), TP_ARGS(folio, type) ); DEFINE_EVENT(f2fs__folio, f2fs_do_write_data_page, TP_PROTO(struct folio *folio, int type), TP_ARGS(folio, type) ); DEFINE_EVENT(f2fs__folio, f2fs_readpage, TP_PROTO(struct folio *folio, int type), TP_ARGS(folio, type) ); DEFINE_EVENT(f2fs__folio, f2fs_set_page_dirty, TP_PROTO(struct folio *folio, int type), TP_ARGS(folio, type) ); TRACE_EVENT(f2fs_replace_atomic_write_block, TP_PROTO(struct inode *inode, struct inode *cow_inode, pgoff_t index, block_t old_addr, block_t new_addr, bool recovery), TP_ARGS(inode, cow_inode, index, old_addr, new_addr, recovery), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(ino_t, cow_ino) __field(pgoff_t, index) __field(block_t, old_addr) __field(block_t, new_addr) __field(bool, recovery) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->cow_ino = cow_inode->i_ino; __entry->index = index; __entry->old_addr = old_addr; __entry->new_addr = new_addr; __entry->recovery = recovery; ), TP_printk("dev = (%d,%d), ino = %lu, cow_ino = %lu, index = %lu, " "old_addr = 0x%llx, new_addr = 0x%llx, recovery = %d", show_dev_ino(__entry), __entry->cow_ino, (unsigned long)__entry->index, (unsigned long long)__entry->old_addr, (unsigned long long)__entry->new_addr, __entry->recovery) ); DECLARE_EVENT_CLASS(f2fs_mmap, TP_PROTO(struct inode *inode, pgoff_t index, vm_flags_t flags, vm_fault_t ret), TP_ARGS(inode, index, flags, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(pgoff_t, index) __field(vm_flags_t, flags) __field(vm_fault_t, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->index = index; __entry->flags = flags; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, index = %lu, flags: %s, ret: %s", show_dev_ino(__entry), (unsigned long)__entry->index, __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE), __print_flags(__entry->ret, "|", VM_FAULT_RESULT_TRACE)) ); DEFINE_EVENT(f2fs_mmap, f2fs_filemap_fault, TP_PROTO(struct inode *inode, pgoff_t index, vm_flags_t flags, vm_fault_t ret), TP_ARGS(inode, index, flags, ret) ); DEFINE_EVENT(f2fs_mmap, f2fs_vm_page_mkwrite, TP_PROTO(struct inode *inode, pgoff_t index, vm_flags_t flags, vm_fault_t ret), TP_ARGS(inode, index, flags, ret) ); TRACE_EVENT(f2fs_writepages, TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type), TP_ARGS(inode, wbc, type), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(int, type) __field(int, dir) __field(long, nr_to_write) __field(long, pages_skipped) __field(loff_t, range_start) __field(loff_t, range_end) __field(pgoff_t, writeback_index) __field(int, sync_mode) __field(char, for_kupdate) __field(char, for_background) __field(char, tagged_writepages) __field(char, for_reclaim) __field(char, range_cyclic) __field(char, for_sync) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->type = type; __entry->dir = S_ISDIR(inode->i_mode); __entry->nr_to_write = wbc->nr_to_write; __entry->pages_skipped = wbc->pages_skipped; __entry->range_start = wbc->range_start; __entry->range_end = wbc->range_end; __entry->writeback_index = inode->i_mapping->writeback_index; __entry->sync_mode = wbc->sync_mode; __entry->for_kupdate = wbc->for_kupdate; __entry->for_background = wbc->for_background; __entry->tagged_writepages = wbc->tagged_writepages; __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; __entry->for_sync = wbc->for_sync; ), TP_printk("dev = (%d,%d), ino = %lu, %s, %s, nr_to_write %ld, " "skipped %ld, start %lld, end %lld, wb_idx %lu, sync_mode %d, " "kupdate %u background %u tagged %u reclaim %u cyclic %u sync %u", show_dev_ino(__entry), show_block_type(__entry->type), show_file_type(__entry->dir), __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, __entry->range_end, (unsigned long)__entry->writeback_index, __entry->sync_mode, __entry->for_kupdate, __entry->for_background, __entry->tagged_writepages, __entry->for_reclaim, __entry->range_cyclic, __entry->for_sync) ); TRACE_EVENT(f2fs_readpages, TP_PROTO(struct inode *inode, pgoff_t start, unsigned int nrpage), TP_ARGS(inode, start, nrpage), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(pgoff_t, start) __field(unsigned int, nrpage) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->start = start; __entry->nrpage = nrpage; ), TP_printk("dev = (%d,%d), ino = %lu, start = %lu nrpage = %u", show_dev_ino(__entry), (unsigned long)__entry->start, __entry->nrpage) ); TRACE_EVENT(f2fs_write_checkpoint, TP_PROTO(struct super_block *sb, int reason, const char *msg), TP_ARGS(sb, reason, msg), TP_STRUCT__entry( __field(dev_t, dev) __field(int, reason) __string(dest_msg, msg) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->reason = reason; __assign_str(dest_msg); ), TP_printk("dev = (%d,%d), checkpoint for %s, state = %s", show_dev(__entry->dev), show_cpreason(__entry->reason), __get_str(dest_msg)) ); DECLARE_EVENT_CLASS(f2fs_discard, TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), TP_ARGS(dev, blkstart, blklen), TP_STRUCT__entry( __field(dev_t, dev) __field(block_t, blkstart) __field(block_t, blklen) ), TP_fast_assign( __entry->dev = dev->bd_dev; __entry->blkstart = blkstart; __entry->blklen = blklen; ), TP_printk("dev = (%d,%d), blkstart = 0x%llx, blklen = 0x%llx", show_dev(__entry->dev), (unsigned long long)__entry->blkstart, (unsigned long long)__entry->blklen) ); DEFINE_EVENT(f2fs_discard, f2fs_queue_discard, TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), TP_ARGS(dev, blkstart, blklen) ); DEFINE_EVENT(f2fs_discard, f2fs_issue_discard, TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), TP_ARGS(dev, blkstart, blklen) ); DEFINE_EVENT(f2fs_discard, f2fs_remove_discard, TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen), TP_ARGS(dev, blkstart, blklen) ); DECLARE_EVENT_CLASS(f2fs_reset_zone, TP_PROTO(struct block_device *dev, block_t blkstart), TP_ARGS(dev, blkstart), TP_STRUCT__entry( __field(dev_t, dev) __field(block_t, blkstart) ), TP_fast_assign( __entry->dev = dev->bd_dev; __entry->blkstart = blkstart; ), TP_printk("dev = (%d,%d), zone at block = 0x%llx", show_dev(__entry->dev), (unsigned long long)__entry->blkstart) ); DEFINE_EVENT(f2fs_reset_zone, f2fs_queue_reset_zone, TP_PROTO(struct block_device *dev, block_t blkstart), TP_ARGS(dev, blkstart) ); DEFINE_EVENT(f2fs_reset_zone, f2fs_issue_reset_zone, TP_PROTO(struct block_device *dev, block_t blkstart), TP_ARGS(dev, blkstart) ); TRACE_EVENT(f2fs_issue_flush, TP_PROTO(struct block_device *dev, unsigned int nobarrier, unsigned int flush_merge, int ret), TP_ARGS(dev, nobarrier, flush_merge, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, nobarrier) __field(unsigned int, flush_merge) __field(int, ret) ), TP_fast_assign( __entry->dev = dev->bd_dev; __entry->nobarrier = nobarrier; __entry->flush_merge = flush_merge; __entry->ret = ret; ), TP_printk("dev = (%d,%d), %s %s, ret = %d", show_dev(__entry->dev), __entry->nobarrier ? "skip (nobarrier)" : "issue", __entry->flush_merge ? " with flush_merge" : "", __entry->ret) ); TRACE_EVENT(f2fs_lookup_extent_tree_start, TP_PROTO(struct inode *inode, unsigned int pgofs, enum extent_type type), TP_ARGS(inode, pgofs, type), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, pgofs) __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; __entry->type = type; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, type = %s", show_dev_ino(__entry), __entry->pgofs, show_extent_type(__entry->type)) ); TRACE_EVENT_CONDITION(f2fs_lookup_read_extent_tree_end, TP_PROTO(struct inode *inode, unsigned int pgofs, struct extent_info *ei), TP_ARGS(inode, pgofs, ei), TP_CONDITION(ei), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, pgofs) __field(unsigned int, fofs) __field(unsigned int, len) __field(u32, blk) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; __entry->fofs = ei->fofs; __entry->len = ei->len; __entry->blk = ei->blk; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " "read_ext_info(fofs: %u, len: %u, blk: %u)", show_dev_ino(__entry), __entry->pgofs, __entry->fofs, __entry->len, __entry->blk) ); TRACE_EVENT_CONDITION(f2fs_lookup_age_extent_tree_end, TP_PROTO(struct inode *inode, unsigned int pgofs, struct extent_info *ei), TP_ARGS(inode, pgofs, ei), TP_CONDITION(ei), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, pgofs) __field(unsigned int, fofs) __field(unsigned int, len) __field(unsigned long long, age) __field(unsigned long long, blocks) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; __entry->fofs = ei->fofs; __entry->len = ei->len; __entry->age = ei->age; __entry->blocks = ei->last_blocks; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " "age_ext_info(fofs: %u, len: %u, age: %llu, blocks: %llu)", show_dev_ino(__entry), __entry->pgofs, __entry->fofs, __entry->len, __entry->age, __entry->blocks) ); TRACE_EVENT(f2fs_update_read_extent_tree_range, TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len, block_t blkaddr, unsigned int c_len), TP_ARGS(inode, pgofs, len, blkaddr, c_len), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, pgofs) __field(u32, blk) __field(unsigned int, len) __field(unsigned int, c_len) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; __entry->len = len; __entry->blk = blkaddr; __entry->c_len = c_len; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " "len = %u, blkaddr = %u, c_len = %u", show_dev_ino(__entry), __entry->pgofs, __entry->len, __entry->blk, __entry->c_len) ); TRACE_EVENT(f2fs_update_age_extent_tree_range, TP_PROTO(struct inode *inode, unsigned int pgofs, unsigned int len, unsigned long long age, unsigned long long last_blks), TP_ARGS(inode, pgofs, len, age, last_blks), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, pgofs) __field(unsigned int, len) __field(unsigned long long, age) __field(unsigned long long, blocks) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->pgofs = pgofs; __entry->len = len; __entry->age = age; __entry->blocks = last_blks; ), TP_printk("dev = (%d,%d), ino = %lu, pgofs = %u, " "len = %u, age = %llu, blocks = %llu", show_dev_ino(__entry), __entry->pgofs, __entry->len, __entry->age, __entry->blocks) ); TRACE_EVENT(f2fs_shrink_extent_tree, TP_PROTO(struct f2fs_sb_info *sbi, unsigned int node_cnt, unsigned int tree_cnt, enum extent_type type), TP_ARGS(sbi, node_cnt, tree_cnt, type), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, node_cnt) __field(unsigned int, tree_cnt) __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = sbi->sb->s_dev; __entry->node_cnt = node_cnt; __entry->tree_cnt = tree_cnt; __entry->type = type; ), TP_printk("dev = (%d,%d), shrunk: node_cnt = %u, tree_cnt = %u, type = %s", show_dev(__entry->dev), __entry->node_cnt, __entry->tree_cnt, show_extent_type(__entry->type)) ); TRACE_EVENT(f2fs_destroy_extent_tree, TP_PROTO(struct inode *inode, unsigned int node_cnt, enum extent_type type), TP_ARGS(inode, node_cnt, type), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(unsigned int, node_cnt) __field(enum extent_type, type) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->node_cnt = node_cnt; __entry->type = type; ), TP_printk("dev = (%d,%d), ino = %lu, destroyed: node_cnt = %u, type = %s", show_dev_ino(__entry), __entry->node_cnt, show_extent_type(__entry->type)) ); DECLARE_EVENT_CLASS(f2fs_sync_dirty_inodes, TP_PROTO(struct super_block *sb, int type, s64 count), TP_ARGS(sb, type, count), TP_STRUCT__entry( __field(dev_t, dev) __field(int, type) __field(s64, count) ), TP_fast_assign( __entry->dev = sb->s_dev; __entry->type = type; __entry->count = count; ), TP_printk("dev = (%d,%d), %s, dirty count = %lld", show_dev(__entry->dev), show_file_type(__entry->type), __entry->count) ); DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_enter, TP_PROTO(struct super_block *sb, int type, s64 count), TP_ARGS(sb, type, count) ); DEFINE_EVENT(f2fs_sync_dirty_inodes, f2fs_sync_dirty_inodes_exit, TP_PROTO(struct super_block *sb, int type, s64 count), TP_ARGS(sb, type, count) ); TRACE_EVENT(f2fs_shutdown, TP_PROTO(struct f2fs_sb_info *sbi, unsigned int mode, int ret), TP_ARGS(sbi, mode, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, mode) __field(int, ret) ), TP_fast_assign( __entry->dev = sbi->sb->s_dev; __entry->mode = mode; __entry->ret = ret; ), TP_printk("dev = (%d,%d), mode: %s, ret:%d", show_dev(__entry->dev), show_shutdown_mode(__entry->mode), __entry->ret) ); DECLARE_EVENT_CLASS(f2fs_zip_start, TP_PROTO(struct inode *inode, pgoff_t cluster_idx, unsigned int cluster_size, unsigned char algtype), TP_ARGS(inode, cluster_idx, cluster_size, algtype), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(pgoff_t, idx) __field(unsigned int, size) __field(unsigned int, algtype) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->idx = cluster_idx; __entry->size = cluster_size; __entry->algtype = algtype; ), TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, " "cluster_size = %u, algorithm = %s", show_dev_ino(__entry), __entry->idx, __entry->size, show_compress_algorithm(__entry->algtype)) ); DECLARE_EVENT_CLASS(f2fs_zip_end, TP_PROTO(struct inode *inode, pgoff_t cluster_idx, unsigned int compressed_size, int ret), TP_ARGS(inode, cluster_idx, compressed_size, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(pgoff_t, idx) __field(unsigned int, size) __field(unsigned int, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->idx = cluster_idx; __entry->size = compressed_size; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, " "compressed_size = %u, ret = %d", show_dev_ino(__entry), __entry->idx, __entry->size, __entry->ret) ); DEFINE_EVENT(f2fs_zip_start, f2fs_compress_pages_start, TP_PROTO(struct inode *inode, pgoff_t cluster_idx, unsigned int cluster_size, unsigned char algtype), TP_ARGS(inode, cluster_idx, cluster_size, algtype) ); DEFINE_EVENT(f2fs_zip_start, f2fs_decompress_pages_start, TP_PROTO(struct inode *inode, pgoff_t cluster_idx, unsigned int cluster_size, unsigned char algtype), TP_ARGS(inode, cluster_idx, cluster_size, algtype) ); DEFINE_EVENT(f2fs_zip_end, f2fs_compress_pages_end, TP_PROTO(struct inode *inode, pgoff_t cluster_idx, unsigned int compressed_size, int ret), TP_ARGS(inode, cluster_idx, compressed_size, ret) ); DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end, TP_PROTO(struct inode *inode, pgoff_t cluster_idx, unsigned int compressed_size, int ret), TP_ARGS(inode, cluster_idx, compressed_size, ret) ); #ifdef CONFIG_F2FS_IOSTAT TRACE_EVENT(f2fs_iostat, TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat), TP_ARGS(sbi, iostat), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long long, app_dio) __field(unsigned long long, app_bio) __field(unsigned long long, app_wio) __field(unsigned long long, app_mio) __field(unsigned long long, app_bcdio) __field(unsigned long long, app_mcdio) __field(unsigned long long, fs_dio) __field(unsigned long long, fs_cdio) __field(unsigned long long, fs_nio) __field(unsigned long long, fs_mio) __field(unsigned long long, fs_gc_dio) __field(unsigned long long, fs_gc_nio) __field(unsigned long long, fs_cp_dio) __field(unsigned long long, fs_cp_nio) __field(unsigned long long, fs_cp_mio) __field(unsigned long long, app_drio) __field(unsigned long long, app_brio) __field(unsigned long long, app_rio) __field(unsigned long long, app_mrio) __field(unsigned long long, app_bcrio) __field(unsigned long long, app_mcrio) __field(unsigned long long, fs_drio) __field(unsigned long long, fs_gdrio) __field(unsigned long long, fs_cdrio) __field(unsigned long long, fs_nrio) __field(unsigned long long, fs_mrio) __field(unsigned long long, fs_discard) __field(unsigned long long, fs_reset_zone) ), TP_fast_assign( __entry->dev = sbi->sb->s_dev; __entry->app_dio = iostat[APP_DIRECT_IO]; __entry->app_bio = iostat[APP_BUFFERED_IO]; __entry->app_wio = iostat[APP_WRITE_IO]; __entry->app_mio = iostat[APP_MAPPED_IO]; __entry->app_bcdio = iostat[APP_BUFFERED_CDATA_IO]; __entry->app_mcdio = iostat[APP_MAPPED_CDATA_IO]; __entry->fs_dio = iostat[FS_DATA_IO]; __entry->fs_cdio = iostat[FS_CDATA_IO]; __entry->fs_nio = iostat[FS_NODE_IO]; __entry->fs_mio = iostat[FS_META_IO]; __entry->fs_gc_dio = iostat[FS_GC_DATA_IO]; __entry->fs_gc_nio = iostat[FS_GC_NODE_IO]; __entry->fs_cp_dio = iostat[FS_CP_DATA_IO]; __entry->fs_cp_nio = iostat[FS_CP_NODE_IO]; __entry->fs_cp_mio = iostat[FS_CP_META_IO]; __entry->app_drio = iostat[APP_DIRECT_READ_IO]; __entry->app_brio = iostat[APP_BUFFERED_READ_IO]; __entry->app_rio = iostat[APP_READ_IO]; __entry->app_mrio = iostat[APP_MAPPED_READ_IO]; __entry->app_bcrio = iostat[APP_BUFFERED_CDATA_READ_IO]; __entry->app_mcrio = iostat[APP_MAPPED_CDATA_READ_IO]; __entry->fs_drio = iostat[FS_DATA_READ_IO]; __entry->fs_gdrio = iostat[FS_GDATA_READ_IO]; __entry->fs_cdrio = iostat[FS_CDATA_READ_IO]; __entry->fs_nrio = iostat[FS_NODE_READ_IO]; __entry->fs_mrio = iostat[FS_META_READ_IO]; __entry->fs_discard = iostat[FS_DISCARD_IO]; __entry->fs_reset_zone = iostat[FS_ZONE_RESET_IO]; ), TP_printk("dev = (%d,%d), " "app [write=%llu (direct=%llu, buffered=%llu), mapped=%llu, " "compr(buffered=%llu, mapped=%llu)], " "fs [data=%llu, cdata=%llu, node=%llu, meta=%llu, discard=%llu, " "reset_zone=%llu], " "gc [data=%llu, node=%llu], " "cp [data=%llu, node=%llu, meta=%llu], " "app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], " "compr(buffered=%llu, mapped=%llu)], " "fs [data=%llu, (gc_data=%llu, cdata=%llu), " "node=%llu, meta=%llu]", show_dev(__entry->dev), __entry->app_wio, __entry->app_dio, __entry->app_bio, __entry->app_mio, __entry->app_bcdio, __entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio, __entry->fs_nio, __entry->fs_mio, __entry->fs_discard, __entry->fs_reset_zone, __entry->fs_gc_dio, __entry->fs_gc_nio, __entry->fs_cp_dio, __entry->fs_cp_nio, __entry->fs_cp_mio, __entry->app_rio, __entry->app_drio, __entry->app_brio, __entry->app_mrio, __entry->app_bcrio, __entry->app_mcrio, __entry->fs_drio, __entry->fs_gdrio, __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio) ); #ifndef __F2FS_IOSTAT_LATENCY_TYPE #define __F2FS_IOSTAT_LATENCY_TYPE struct f2fs_iostat_latency { unsigned int peak_lat; unsigned int avg_lat; unsigned int cnt; }; #endif /* __F2FS_IOSTAT_LATENCY_TYPE */ TRACE_EVENT(f2fs_iostat_latency, TP_PROTO(struct f2fs_sb_info *sbi, struct f2fs_iostat_latency (*iostat_lat)[NR_PAGE_TYPE]), TP_ARGS(sbi, iostat_lat), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, d_rd_peak) __field(unsigned int, d_rd_avg) __field(unsigned int, d_rd_cnt) __field(unsigned int, n_rd_peak) __field(unsigned int, n_rd_avg) __field(unsigned int, n_rd_cnt) __field(unsigned int, m_rd_peak) __field(unsigned int, m_rd_avg) __field(unsigned int, m_rd_cnt) __field(unsigned int, d_wr_s_peak) __field(unsigned int, d_wr_s_avg) __field(unsigned int, d_wr_s_cnt) __field(unsigned int, n_wr_s_peak) __field(unsigned int, n_wr_s_avg) __field(unsigned int, n_wr_s_cnt) __field(unsigned int, m_wr_s_peak) __field(unsigned int, m_wr_s_avg) __field(unsigned int, m_wr_s_cnt) __field(unsigned int, d_wr_as_peak) __field(unsigned int, d_wr_as_avg) __field(unsigned int, d_wr_as_cnt) __field(unsigned int, n_wr_as_peak) __field(unsigned int, n_wr_as_avg) __field(unsigned int, n_wr_as_cnt) __field(unsigned int, m_wr_as_peak) __field(unsigned int, m_wr_as_avg) __field(unsigned int, m_wr_as_cnt) ), TP_fast_assign( __entry->dev = sbi->sb->s_dev; __entry->d_rd_peak = iostat_lat[READ_IO][DATA].peak_lat; __entry->d_rd_avg = iostat_lat[READ_IO][DATA].avg_lat; __entry->d_rd_cnt = iostat_lat[READ_IO][DATA].cnt; __entry->n_rd_peak = iostat_lat[READ_IO][NODE].peak_lat; __entry->n_rd_avg = iostat_lat[READ_IO][NODE].avg_lat; __entry->n_rd_cnt = iostat_lat[READ_IO][NODE].cnt; __entry->m_rd_peak = iostat_lat[READ_IO][META].peak_lat; __entry->m_rd_avg = iostat_lat[READ_IO][META].avg_lat; __entry->m_rd_cnt = iostat_lat[READ_IO][META].cnt; __entry->d_wr_s_peak = iostat_lat[WRITE_SYNC_IO][DATA].peak_lat; __entry->d_wr_s_avg = iostat_lat[WRITE_SYNC_IO][DATA].avg_lat; __entry->d_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][DATA].cnt; __entry->n_wr_s_peak = iostat_lat[WRITE_SYNC_IO][NODE].peak_lat; __entry->n_wr_s_avg = iostat_lat[WRITE_SYNC_IO][NODE].avg_lat; __entry->n_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][NODE].cnt; __entry->m_wr_s_peak = iostat_lat[WRITE_SYNC_IO][META].peak_lat; __entry->m_wr_s_avg = iostat_lat[WRITE_SYNC_IO][META].avg_lat; __entry->m_wr_s_cnt = iostat_lat[WRITE_SYNC_IO][META].cnt; __entry->d_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][DATA].peak_lat; __entry->d_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][DATA].avg_lat; __entry->d_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][DATA].cnt; __entry->n_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][NODE].peak_lat; __entry->n_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][NODE].avg_lat; __entry->n_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][NODE].cnt; __entry->m_wr_as_peak = iostat_lat[WRITE_ASYNC_IO][META].peak_lat; __entry->m_wr_as_avg = iostat_lat[WRITE_ASYNC_IO][META].avg_lat; __entry->m_wr_as_cnt = iostat_lat[WRITE_ASYNC_IO][META].cnt; ), TP_printk("dev = (%d,%d), " "iotype [peak lat.(ms)/avg lat.(ms)/count], " "rd_data [%u/%u/%u], rd_node [%u/%u/%u], rd_meta [%u/%u/%u], " "wr_sync_data [%u/%u/%u], wr_sync_node [%u/%u/%u], " "wr_sync_meta [%u/%u/%u], wr_async_data [%u/%u/%u], " "wr_async_node [%u/%u/%u], wr_async_meta [%u/%u/%u]", show_dev(__entry->dev), __entry->d_rd_peak, __entry->d_rd_avg, __entry->d_rd_cnt, __entry->n_rd_peak, __entry->n_rd_avg, __entry->n_rd_cnt, __entry->m_rd_peak, __entry->m_rd_avg, __entry->m_rd_cnt, __entry->d_wr_s_peak, __entry->d_wr_s_avg, __entry->d_wr_s_cnt, __entry->n_wr_s_peak, __entry->n_wr_s_avg, __entry->n_wr_s_cnt, __entry->m_wr_s_peak, __entry->m_wr_s_avg, __entry->m_wr_s_cnt, __entry->d_wr_as_peak, __entry->d_wr_as_avg, __entry->d_wr_as_cnt, __entry->n_wr_as_peak, __entry->n_wr_as_avg, __entry->n_wr_as_cnt, __entry->m_wr_as_peak, __entry->m_wr_as_avg, __entry->m_wr_as_cnt) ); #endif TRACE_EVENT(f2fs_bmap, TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock), TP_ARGS(inode, lblock, pblock), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(sector_t, lblock) __field(sector_t, pblock) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->lblock = lblock; __entry->pblock = pblock; ), TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld", show_dev_ino(__entry), (unsigned long long)__entry->lblock, (unsigned long long)__entry->pblock) ); TRACE_EVENT(f2fs_fiemap, TP_PROTO(struct inode *inode, sector_t lblock, sector_t pblock, unsigned long long len, unsigned int flags, int ret), TP_ARGS(inode, lblock, pblock, len, flags, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(ino_t, ino) __field(sector_t, lblock) __field(sector_t, pblock) __field(unsigned long long, len) __field(unsigned int, flags) __field(int, ret) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; __entry->lblock = lblock; __entry->pblock = pblock; __entry->len = len; __entry->flags = flags; __entry->ret = ret; ), TP_printk("dev = (%d,%d), ino = %lu, lblock:%lld, pblock:%lld, " "len:%llu, flags:%u, ret:%d", show_dev_ino(__entry), (unsigned long long)__entry->lblock, (unsigned long long)__entry->pblock, __entry->len, __entry->flags, __entry->ret) ); DECLARE_EVENT_CLASS(f2fs__rw_start, TP_PROTO(struct inode *inode, loff_t offset, int bytes, pid_t pid, char *pathname, char *command), TP_ARGS(inode, offset, bytes, pid, pathname, command), TP_STRUCT__entry( __string(pathbuf, pathname) __field(loff_t, offset) __field(int, bytes) __field(loff_t, i_size) __string(cmdline, command) __field(pid_t, pid) __field(ino_t, ino) ), TP_fast_assign( /* * Replace the spaces in filenames and cmdlines * because this screws up the tooling that parses * the traces. */ __assign_str(pathbuf); (void)strreplace(__get_str(pathbuf), ' ', '_'); __entry->offset = offset; __entry->bytes = bytes; __entry->i_size = i_size_read(inode); __assign_str(cmdline); (void)strreplace(__get_str(cmdline), ' ', '_'); __entry->pid = pid; __entry->ino = inode->i_ino; ), TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s," " pid %d, i_size %llu, ino %lu", __get_str(pathbuf), __entry->offset, __entry->bytes, __get_str(cmdline), __entry->pid, __entry->i_size, (unsigned long) __entry->ino) ); DECLARE_EVENT_CLASS(f2fs__rw_end, TP_PROTO(struct inode *inode, loff_t offset, int bytes), TP_ARGS(inode, offset, bytes), TP_STRUCT__entry( __field(ino_t, ino) __field(loff_t, offset) __field(int, bytes) ), TP_fast_assign( __entry->ino = inode->i_ino; __entry->offset = offset; __entry->bytes = bytes; ), TP_printk("ino %lu, offset %llu, bytes %d", (unsigned long) __entry->ino, __entry->offset, __entry->bytes) ); DEFINE_EVENT(f2fs__rw_start, f2fs_dataread_start, TP_PROTO(struct inode *inode, loff_t offset, int bytes, pid_t pid, char *pathname, char *command), TP_ARGS(inode, offset, bytes, pid, pathname, command) ); DEFINE_EVENT(f2fs__rw_end, f2fs_dataread_end, TP_PROTO(struct inode *inode, loff_t offset, int bytes), TP_ARGS(inode, offset, bytes) ); DEFINE_EVENT(f2fs__rw_start, f2fs_datawrite_start, TP_PROTO(struct inode *inode, loff_t offset, int bytes, pid_t pid, char *pathname, char *command), TP_ARGS(inode, offset, bytes, pid, pathname, command) ); DEFINE_EVENT(f2fs__rw_end, f2fs_datawrite_end, TP_PROTO(struct inode *inode, loff_t offset, int bytes), TP_ARGS(inode, offset, bytes) ); #endif /* _TRACE_F2FS_H */ /* This part must be outside protection */ #include <trace/define_trace.h> |
| 22 17 18 18 15 2 16 17 14 2 16 20 21 21 20 20 5 4 4 5 4 5 7 7 3 6 5 5 16 16 16 16 16 13 7 7 3 6 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 Facebook */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/build_bug.h> #include <linux/debugfs.h> #include "blk.h" #include "blk-mq.h" #include "blk-mq-debugfs.h" #include "blk-mq-sched.h" #include "blk-rq-qos.h" static int queue_poll_stat_show(void *data, struct seq_file *m) { return 0; } static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) __acquires(&q->requeue_lock) { struct request_queue *q = m->private; spin_lock_irq(&q->requeue_lock); return seq_list_start(&q->requeue_list, *pos); } static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) { struct request_queue *q = m->private; return seq_list_next(v, &q->requeue_list, pos); } static void queue_requeue_list_stop(struct seq_file *m, void *v) __releases(&q->requeue_lock) { struct request_queue *q = m->private; spin_unlock_irq(&q->requeue_lock); } static const struct seq_operations queue_requeue_list_seq_ops = { .start = queue_requeue_list_start, .next = queue_requeue_list_next, .stop = queue_requeue_list_stop, .show = blk_mq_debugfs_rq_show, }; static int blk_flags_show(struct seq_file *m, const unsigned long flags, const char *const *flag_name, int flag_name_count) { bool sep = false; int i; for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { if (!(flags & BIT(i))) continue; if (sep) seq_puts(m, "|"); sep = true; if (i < flag_name_count && flag_name[i]) seq_puts(m, flag_name[i]); else seq_printf(m, "%d", i); } return 0; } static int queue_pm_only_show(void *data, struct seq_file *m) { struct request_queue *q = data; seq_printf(m, "%d\n", atomic_read(&q->pm_only)); return 0; } #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name static const char *const blk_queue_flag_name[] = { QUEUE_FLAG_NAME(DYING), QUEUE_FLAG_NAME(NOMERGES), QUEUE_FLAG_NAME(SAME_COMP), QUEUE_FLAG_NAME(FAIL_IO), QUEUE_FLAG_NAME(NOXMERGES), QUEUE_FLAG_NAME(SAME_FORCE), QUEUE_FLAG_NAME(INIT_DONE), QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(QUIESCED), QUEUE_FLAG_NAME(RQ_ALLOC_TIME), QUEUE_FLAG_NAME(HCTX_ACTIVE), QUEUE_FLAG_NAME(SQ_SCHED), }; #undef QUEUE_FLAG_NAME static int queue_state_show(void *data, struct seq_file *m) { struct request_queue *q = data; BUILD_BUG_ON(ARRAY_SIZE(blk_queue_flag_name) != QUEUE_FLAG_MAX); blk_flags_show(m, q->queue_flags, blk_queue_flag_name, ARRAY_SIZE(blk_queue_flag_name)); seq_puts(m, "\n"); return 0; } static ssize_t queue_state_write(void *data, const char __user *buf, size_t count, loff_t *ppos) { struct request_queue *q = data; char opbuf[16] = { }, *op; /* * The "state" attribute is removed when the queue is removed. Don't * allow setting the state on a dying queue to avoid a use-after-free. */ if (blk_queue_dying(q)) return -ENOENT; if (count >= sizeof(opbuf)) { pr_err("%s: operation too long\n", __func__); goto inval; } if (copy_from_user(opbuf, buf, count)) return -EFAULT; op = strstrip(opbuf); if (strcmp(op, "run") == 0) { blk_mq_run_hw_queues(q, true); } else if (strcmp(op, "start") == 0) { blk_mq_start_stopped_hw_queues(q, true); } else if (strcmp(op, "kick") == 0) { blk_mq_kick_requeue_list(q); } else { pr_err("%s: unsupported operation '%s'\n", __func__, op); inval: pr_err("%s: use 'run', 'start' or 'kick'\n", __func__); return -EINVAL; } return count; } static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, { "pm_only", 0600, queue_pm_only_show, NULL }, { "state", 0600, queue_state_show, queue_state_write }, { "zone_wplugs", 0400, queue_zone_wplugs_show, NULL }, { }, }; #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name static const char *const hctx_state_name[] = { HCTX_STATE_NAME(STOPPED), HCTX_STATE_NAME(TAG_ACTIVE), HCTX_STATE_NAME(SCHED_RESTART), HCTX_STATE_NAME(INACTIVE), }; #undef HCTX_STATE_NAME static int hctx_state_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; BUILD_BUG_ON(ARRAY_SIZE(hctx_state_name) != BLK_MQ_S_MAX); blk_flags_show(m, hctx->state, hctx_state_name, ARRAY_SIZE(hctx_state_name)); seq_puts(m, "\n"); return 0; } #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name static const char *const alloc_policy_name[] = { BLK_TAG_ALLOC_NAME(FIFO), BLK_TAG_ALLOC_NAME(RR), }; #undef BLK_TAG_ALLOC_NAME #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name static const char *const hctx_flag_name[] = { HCTX_FLAG_NAME(SHOULD_MERGE), HCTX_FLAG_NAME(TAG_QUEUE_SHARED), HCTX_FLAG_NAME(STACKING), HCTX_FLAG_NAME(TAG_HCTX_SHARED), HCTX_FLAG_NAME(BLOCKING), HCTX_FLAG_NAME(NO_SCHED), HCTX_FLAG_NAME(NO_SCHED_BY_DEFAULT), }; #undef HCTX_FLAG_NAME static int hctx_flags_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); BUILD_BUG_ON(ARRAY_SIZE(hctx_flag_name) != BLK_MQ_F_ALLOC_POLICY_START_BIT); BUILD_BUG_ON(ARRAY_SIZE(alloc_policy_name) != BLK_TAG_ALLOC_MAX); seq_puts(m, "alloc_policy="); if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && alloc_policy_name[alloc_policy]) seq_puts(m, alloc_policy_name[alloc_policy]); else seq_printf(m, "%d", alloc_policy); seq_puts(m, " "); blk_flags_show(m, hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); seq_puts(m, "\n"); return 0; } #define CMD_FLAG_NAME(name) [__REQ_##name] = #name static const char *const cmd_flag_name[] = { CMD_FLAG_NAME(FAILFAST_DEV), CMD_FLAG_NAME(FAILFAST_TRANSPORT), CMD_FLAG_NAME(FAILFAST_DRIVER), CMD_FLAG_NAME(SYNC), CMD_FLAG_NAME(META), CMD_FLAG_NAME(PRIO), CMD_FLAG_NAME(NOMERGE), CMD_FLAG_NAME(IDLE), CMD_FLAG_NAME(INTEGRITY), CMD_FLAG_NAME(FUA), CMD_FLAG_NAME(PREFLUSH), CMD_FLAG_NAME(RAHEAD), CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(NOWAIT), CMD_FLAG_NAME(POLLED), CMD_FLAG_NAME(ALLOC_CACHE), CMD_FLAG_NAME(SWAP), CMD_FLAG_NAME(DRV), CMD_FLAG_NAME(FS_PRIVATE), CMD_FLAG_NAME(ATOMIC), CMD_FLAG_NAME(NOUNMAP), }; #undef CMD_FLAG_NAME #define RQF_NAME(name) [__RQF_##name] = #name static const char *const rqf_name[] = { RQF_NAME(STARTED), RQF_NAME(FLUSH_SEQ), RQF_NAME(MIXED_MERGE), RQF_NAME(DONTPREP), RQF_NAME(SCHED_TAGS), RQF_NAME(USE_SCHED), RQF_NAME(FAILED), RQF_NAME(QUIET), RQF_NAME(IO_STAT), RQF_NAME(PM), RQF_NAME(HASHED), RQF_NAME(STATS), RQF_NAME(SPECIAL_PAYLOAD), RQF_NAME(ZONE_WRITE_PLUGGING), RQF_NAME(TIMED_OUT), RQF_NAME(RESV), }; #undef RQF_NAME static const char *const blk_mq_rq_state_name_array[] = { [MQ_RQ_IDLE] = "idle", [MQ_RQ_IN_FLIGHT] = "in_flight", [MQ_RQ_COMPLETE] = "complete", }; static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) { if (WARN_ON_ONCE((unsigned int)rq_state >= ARRAY_SIZE(blk_mq_rq_state_name_array))) return "(?)"; return blk_mq_rq_state_name_array[rq_state]; } int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) { const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; const enum req_op op = req_op(rq); const char *op_str = blk_op_str(op); BUILD_BUG_ON(ARRAY_SIZE(cmd_flag_name) != __REQ_NR_BITS); BUILD_BUG_ON(ARRAY_SIZE(rqf_name) != __RQF_BITS); seq_printf(m, "%p {.op=", rq); if (strcmp(op_str, "UNKNOWN") == 0) seq_printf(m, "%u", op); else seq_printf(m, "%s", op_str); seq_puts(m, ", .cmd_flags="); blk_flags_show(m, (__force unsigned int)(rq->cmd_flags & ~REQ_OP_MASK), cmd_flag_name, ARRAY_SIZE(cmd_flag_name)); seq_puts(m, ", .rq_flags="); blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, ARRAY_SIZE(rqf_name)); seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, rq->internal_tag); if (mq_ops->show_rq) mq_ops->show_rq(m, rq); seq_puts(m, "}\n"); return 0; } EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) { return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); } EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&hctx->lock) { struct blk_mq_hw_ctx *hctx = m->private; spin_lock(&hctx->lock); return seq_list_start(&hctx->dispatch, *pos); } static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) { struct blk_mq_hw_ctx *hctx = m->private; return seq_list_next(v, &hctx->dispatch, pos); } static void hctx_dispatch_stop(struct seq_file *m, void *v) __releases(&hctx->lock) { struct blk_mq_hw_ctx *hctx = m->private; spin_unlock(&hctx->lock); } static const struct seq_operations hctx_dispatch_seq_ops = { .start = hctx_dispatch_start, .next = hctx_dispatch_next, .stop = hctx_dispatch_stop, .show = blk_mq_debugfs_rq_show, }; struct show_busy_params { struct seq_file *m; struct blk_mq_hw_ctx *hctx; }; /* * Note: the state of a request may change while this function is in progress, * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to * keep iterating requests. */ static bool hctx_show_busy_rq(struct request *rq, void *data) { const struct show_busy_params *params = data; if (rq->mq_hctx == params->hctx) __blk_mq_debugfs_rq_show(params->m, rq); return true; } static int hctx_busy_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; struct show_busy_params params = { .m = m, .hctx = hctx }; blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, ¶ms); return 0; } static const char *const hctx_types[] = { [HCTX_TYPE_DEFAULT] = "default", [HCTX_TYPE_READ] = "read", [HCTX_TYPE_POLL] = "poll", }; static int hctx_type_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES); seq_printf(m, "%s\n", hctx_types[hctx->type]); return 0; } static int hctx_ctx_map_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; sbitmap_bitmap_show(&hctx->ctx_map, m); return 0; } static void blk_mq_debugfs_tags_show(struct seq_file *m, struct blk_mq_tags *tags) { seq_printf(m, "nr_tags=%u\n", tags->nr_tags); seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); seq_printf(m, "active_queues=%d\n", READ_ONCE(tags->active_queues)); seq_puts(m, "\nbitmap_tags:\n"); sbitmap_queue_show(&tags->bitmap_tags, m); if (tags->nr_reserved_tags) { seq_puts(m, "\nbreserved_tags:\n"); sbitmap_queue_show(&tags->breserved_tags, m); } } static int hctx_tags_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; struct request_queue *q = hctx->queue; int res; res = mutex_lock_interruptible(&q->sysfs_lock); if (res) goto out; if (hctx->tags) blk_mq_debugfs_tags_show(m, hctx->tags); mutex_unlock(&q->sysfs_lock); out: return res; } static int hctx_tags_bitmap_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; struct request_queue *q = hctx->queue; int res; res = mutex_lock_interruptible(&q->sysfs_lock); if (res) goto out; if (hctx->tags) sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); mutex_unlock(&q->sysfs_lock); out: return res; } static int hctx_sched_tags_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; struct request_queue *q = hctx->queue; int res; res = mutex_lock_interruptible(&q->sysfs_lock); if (res) goto out; if (hctx->sched_tags) blk_mq_debugfs_tags_show(m, hctx->sched_tags); mutex_unlock(&q->sysfs_lock); out: return res; } static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; struct request_queue *q = hctx->queue; int res; res = mutex_lock_interruptible(&q->sysfs_lock); if (res) goto out; if (hctx->sched_tags) sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); mutex_unlock(&q->sysfs_lock); out: return res; } static int hctx_active_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; seq_printf(m, "%d\n", __blk_mq_active_requests(hctx)); return 0; } static int hctx_dispatch_busy_show(void *data, struct seq_file *m) { struct blk_mq_hw_ctx *hctx = data; seq_printf(m, "%u\n", hctx->dispatch_busy); return 0; } #define CTX_RQ_SEQ_OPS(name, type) \ static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \ __acquires(&ctx->lock) \ { \ struct blk_mq_ctx *ctx = m->private; \ \ spin_lock(&ctx->lock); \ return seq_list_start(&ctx->rq_lists[type], *pos); \ } \ \ static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \ loff_t *pos) \ { \ struct blk_mq_ctx *ctx = m->private; \ \ return seq_list_next(v, &ctx->rq_lists[type], pos); \ } \ \ static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \ __releases(&ctx->lock) \ { \ struct blk_mq_ctx *ctx = m->private; \ \ spin_unlock(&ctx->lock); \ } \ \ static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \ .start = ctx_##name##_rq_list_start, \ .next = ctx_##name##_rq_list_next, \ .stop = ctx_##name##_rq_list_stop, \ .show = blk_mq_debugfs_rq_show, \ } CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); static int blk_mq_debugfs_show(struct seq_file *m, void *v) { const struct blk_mq_debugfs_attr *attr = m->private; void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; return attr->show(data, m); } static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct seq_file *m = file->private_data; const struct blk_mq_debugfs_attr *attr = m->private; void *data = d_inode(file->f_path.dentry->d_parent)->i_private; /* * Attributes that only implement .seq_ops are read-only and 'attr' is * the same with 'data' in this case. */ if (attr == data || !attr->write) return -EPERM; return attr->write(data, buf, count, ppos); } static int blk_mq_debugfs_open(struct inode *inode, struct file *file) { const struct blk_mq_debugfs_attr *attr = inode->i_private; void *data = d_inode(file->f_path.dentry->d_parent)->i_private; struct seq_file *m; int ret; if (attr->seq_ops) { ret = seq_open(file, attr->seq_ops); if (!ret) { m = file->private_data; m->private = data; } return ret; } if (WARN_ON_ONCE(!attr->show)) return -EPERM; return single_open(file, blk_mq_debugfs_show, inode->i_private); } static int blk_mq_debugfs_release(struct inode *inode, struct file *file) { const struct blk_mq_debugfs_attr *attr = inode->i_private; if (attr->show) return single_release(inode, file); return seq_release(inode, file); } static const struct file_operations blk_mq_debugfs_fops = { .open = blk_mq_debugfs_open, .read = seq_read, .write = blk_mq_debugfs_write, .llseek = seq_lseek, .release = blk_mq_debugfs_release, }; static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { {"state", 0400, hctx_state_show}, {"flags", 0400, hctx_flags_show}, {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, {"busy", 0400, hctx_busy_show}, {"ctx_map", 0400, hctx_ctx_map_show}, {"tags", 0400, hctx_tags_show}, {"tags_bitmap", 0400, hctx_tags_bitmap_show}, {"sched_tags", 0400, hctx_sched_tags_show}, {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, {"active", 0400, hctx_active_show}, {"dispatch_busy", 0400, hctx_dispatch_busy_show}, {"type", 0400, hctx_type_show}, {}, }; static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, {}, }; static void debugfs_create_files(struct dentry *parent, void *data, const struct blk_mq_debugfs_attr *attr) { if (IS_ERR_OR_NULL(parent)) return; d_inode(parent)->i_private = data; for (; attr->name; attr++) debugfs_create_file(attr->name, attr->mode, parent, (void *)attr, &blk_mq_debugfs_fops); } void blk_mq_debugfs_register(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); /* * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir * didn't exist yet (because we don't know what to name the directory * until the queue is registered to a gendisk). */ if (q->elevator && !q->sched_debugfs_dir) blk_mq_debugfs_register_sched(q); /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ queue_for_each_hw_ctx(q, hctx, i) { if (!hctx->debugfs_dir) blk_mq_debugfs_register_hctx(q, hctx); if (q->elevator && !hctx->sched_debugfs_dir) blk_mq_debugfs_register_sched_hctx(q, hctx); } if (q->rq_qos) { struct rq_qos *rqos = q->rq_qos; while (rqos) { blk_mq_debugfs_register_rqos(rqos); rqos = rqos->next; } } } static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx) { struct dentry *ctx_dir; char name[20]; snprintf(name, sizeof(name), "cpu%u", ctx->cpu); ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs); } void blk_mq_debugfs_register_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) { struct blk_mq_ctx *ctx; char name[20]; int i; if (!q->debugfs_dir) return; snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); hctx_for_each_ctx(hctx, ctx, i) blk_mq_debugfs_register_ctx(hctx, ctx); } void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) { if (!hctx->queue->debugfs_dir) return; debugfs_remove_recursive(hctx->debugfs_dir); hctx->sched_debugfs_dir = NULL; hctx->debugfs_dir = NULL; } void blk_mq_debugfs_register_hctxs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_debugfs_register_hctx(q, hctx); } void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) { struct blk_mq_hw_ctx *hctx; unsigned long i; queue_for_each_hw_ctx(q, hctx, i) blk_mq_debugfs_unregister_hctx(hctx); } void blk_mq_debugfs_register_sched(struct request_queue *q) { struct elevator_type *e = q->elevator->type; lockdep_assert_held(&q->debugfs_mutex); /* * If the parent directory has not been created yet, return, we will be * called again later on and the directory/files will be created then. */ if (!q->debugfs_dir) return; if (!e->queue_debugfs_attrs) return; q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); } void blk_mq_debugfs_unregister_sched(struct request_queue *q) { lockdep_assert_held(&q->debugfs_mutex); debugfs_remove_recursive(q->sched_debugfs_dir); q->sched_debugfs_dir = NULL; } static const char *rq_qos_id_to_name(enum rq_qos_id id) { switch (id) { case RQ_QOS_WBT: return "wbt"; case RQ_QOS_LATENCY: return "latency"; case RQ_QOS_COST: return "cost"; } return "unknown"; } void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) { lockdep_assert_held(&rqos->disk->queue->debugfs_mutex); if (!rqos->disk->queue->debugfs_dir) return; debugfs_remove_recursive(rqos->debugfs_dir); rqos->debugfs_dir = NULL; } void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) { struct request_queue *q = rqos->disk->queue; const char *dir_name = rq_qos_id_to_name(rqos->id); lockdep_assert_held(&q->debugfs_mutex); if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) return; if (!q->rqos_debugfs_dir) q->rqos_debugfs_dir = debugfs_create_dir("rqos", q->debugfs_dir); rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir); debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); } void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx) { struct elevator_type *e = q->elevator->type; lockdep_assert_held(&q->debugfs_mutex); /* * If the parent debugfs directory has not been created yet, return; * We will be called again later on with appropriate parent debugfs * directory from blk_register_queue() */ if (!hctx->debugfs_dir) return; if (!e->hctx_debugfs_attrs) return; hctx->sched_debugfs_dir = debugfs_create_dir("sched", hctx->debugfs_dir); debugfs_create_files(hctx->sched_debugfs_dir, hctx, e->hctx_debugfs_attrs); } void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) { lockdep_assert_held(&hctx->queue->debugfs_mutex); if (!hctx->queue->debugfs_dir) return; debugfs_remove_recursive(hctx->sched_debugfs_dir); hctx->sched_debugfs_dir = NULL; } |
| 5 4 2 2 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | // SPDX-License-Identifier: GPL-2.0 /* MPTCP socket monitoring support * * Copyright (c) 2019 Red Hat * * Author: Davide Caratti <dcaratti@redhat.com> */ #include <linux/kernel.h> #include <linux/net.h> #include <linux/inet_diag.h> #include <net/netlink.h> #include "protocol.h" static int subflow_get_info(struct sock *sk, struct sk_buff *skb) { struct mptcp_subflow_context *sf; struct nlattr *start; u32 flags = 0; bool slow; int err; if (inet_sk_state_load(sk) == TCP_LISTEN) return 0; start = nla_nest_start_noflag(skb, INET_ULP_INFO_MPTCP); if (!start) return -EMSGSIZE; slow = lock_sock_fast(sk); rcu_read_lock(); sf = rcu_dereference(inet_csk(sk)->icsk_ulp_data); if (!sf) { err = 0; goto nla_failure; } if (sf->mp_capable) flags |= MPTCP_SUBFLOW_FLAG_MCAP_REM; if (sf->request_mptcp) flags |= MPTCP_SUBFLOW_FLAG_MCAP_LOC; if (sf->mp_join) flags |= MPTCP_SUBFLOW_FLAG_JOIN_REM; if (sf->request_join) flags |= MPTCP_SUBFLOW_FLAG_JOIN_LOC; if (sf->backup) flags |= MPTCP_SUBFLOW_FLAG_BKUP_REM; if (sf->request_bkup) flags |= MPTCP_SUBFLOW_FLAG_BKUP_LOC; if (sf->fully_established) flags |= MPTCP_SUBFLOW_FLAG_FULLY_ESTABLISHED; if (sf->conn_finished) flags |= MPTCP_SUBFLOW_FLAG_CONNECTED; if (sf->map_valid) flags |= MPTCP_SUBFLOW_FLAG_MAPVALID; if (nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_REM, sf->remote_token) || nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_TOKEN_LOC, sf->token) || nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ, sf->rel_write_seq) || nla_put_u64_64bit(skb, MPTCP_SUBFLOW_ATTR_MAP_SEQ, sf->map_seq, MPTCP_SUBFLOW_ATTR_PAD) || nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_MAP_SFSEQ, sf->map_subflow_seq) || nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_SSN_OFFSET, sf->ssn_offset) || nla_put_u16(skb, MPTCP_SUBFLOW_ATTR_MAP_DATALEN, sf->map_data_len) || nla_put_u32(skb, MPTCP_SUBFLOW_ATTR_FLAGS, flags) || nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_REM, sf->remote_id) || nla_put_u8(skb, MPTCP_SUBFLOW_ATTR_ID_LOC, subflow_get_local_id(sf))) { err = -EMSGSIZE; goto nla_failure; } rcu_read_unlock(); unlock_sock_fast(sk, slow); nla_nest_end(skb, start); return 0; nla_failure: rcu_read_unlock(); unlock_sock_fast(sk, slow); nla_nest_cancel(skb, start); return err; } static size_t subflow_get_info_size(const struct sock *sk) { size_t size = 0; size += nla_total_size(0) + /* INET_ULP_INFO_MPTCP */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_TOKEN_REM */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_TOKEN_LOC */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_RELWRITE_SEQ */ nla_total_size_64bit(8) + /* MPTCP_SUBFLOW_ATTR_MAP_SEQ */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_MAP_SFSEQ */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_SSN_OFFSET */ nla_total_size(2) + /* MPTCP_SUBFLOW_ATTR_MAP_DATALEN */ nla_total_size(4) + /* MPTCP_SUBFLOW_ATTR_FLAGS */ nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_REM */ nla_total_size(1) + /* MPTCP_SUBFLOW_ATTR_ID_LOC */ 0; return size; } void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops) { ops->get_info = subflow_get_info; ops->get_info_size = subflow_get_info_size; } |
| 16779 8097 16351 12525 8098 8097 12519 8097 16213 16228 16191 1160 1160 1160 918 368 1159 21 21 21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra * * Provides a framework for enqueueing and running callbacks from hardirq * context. The enqueueing is NMI-safe. */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/irq_work.h> #include <linux/percpu.h> #include <linux/hardirq.h> #include <linux/irqflags.h> #include <linux/sched.h> #include <linux/tick.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/smp.h> #include <linux/smpboot.h> #include <asm/processor.h> #include <linux/kasan.h> #include <trace/events/ipi.h> static DEFINE_PER_CPU(struct llist_head, raised_list); static DEFINE_PER_CPU(struct llist_head, lazy_list); static DEFINE_PER_CPU(struct task_struct *, irq_workd); static void wake_irq_workd(void) { struct task_struct *tsk = __this_cpu_read(irq_workd); if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) wake_up_process(tsk); } #ifdef CONFIG_SMP static void irq_work_wake(struct irq_work *entry) { wake_irq_workd(); } static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = IRQ_WORK_INIT_HARD(irq_work_wake); #endif static int irq_workd_should_run(unsigned int cpu) { return !llist_empty(this_cpu_ptr(&lazy_list)); } /* * Claim the entry so that no one else will poke at it. */ static bool irq_work_claim(struct irq_work *work) { int oflags; oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->node.a_flags); /* * If the work is already pending, no need to raise the IPI. * The pairing smp_mb() in irq_work_single() makes sure * everything we did before is visible. */ if (oflags & IRQ_WORK_PENDING) return false; return true; } void __weak arch_irq_work_raise(void) { /* * Lame architectures will get the timer tick callback */ } static __always_inline void irq_work_raise(struct irq_work *work) { if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt()) trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func); arch_irq_work_raise(); } /* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) { struct llist_head *list; bool rt_lazy_work = false; bool lazy_work = false; int work_flags; work_flags = atomic_read(&work->node.a_flags); if (work_flags & IRQ_WORK_LAZY) lazy_work = true; else if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(work_flags & IRQ_WORK_HARD_IRQ)) rt_lazy_work = true; if (lazy_work || rt_lazy_work) list = this_cpu_ptr(&lazy_list); else list = this_cpu_ptr(&raised_list); if (!llist_add(&work->node.llist, list)) return; /* If the work is "lazy", handle it from next tick if any */ if (!lazy_work || tick_nohz_tick_stopped()) irq_work_raise(work); } /* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) { /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; /* Queue the entry and raise the IPI if needed. */ preempt_disable(); __irq_work_queue_local(work); preempt_enable(); return true; } EXPORT_SYMBOL_GPL(irq_work_queue); /* * Enqueue the irq_work @work on @cpu unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */ bool irq_work_queue_on(struct irq_work *work, int cpu) { #ifndef CONFIG_SMP return irq_work_queue(work); #else /* CONFIG_SMP: */ /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; kasan_record_aux_stack_noalloc(work); preempt_disable(); if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); /* * On PREEMPT_RT the items which are not marked as * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work * item is used on the remote CPU to wake the thread. */ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ)) { if (!llist_add(&work->node.llist, &per_cpu(lazy_list, cpu))) goto out; work = &per_cpu(irq_work_wakeup, cpu); if (!irq_work_claim(work)) goto out; } __smp_call_single_queue(cpu, &work->node.llist); } else { __irq_work_queue_local(work); } out: preempt_enable(); return true; #endif /* CONFIG_SMP */ } bool irq_work_needs_cpu(void) { struct llist_head *raised, *lazy; raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); if (llist_empty(raised) || arch_irq_work_has_interrupt()) if (llist_empty(lazy)) return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); return true; } void irq_work_single(void *arg) { struct irq_work *work = arg; int flags; /* * Clear the PENDING bit, after this point the @work can be re-used. * The PENDING bit acts as a lock, and we own it, so we can clear it * without atomic ops. */ flags = atomic_read(&work->node.a_flags); flags &= ~IRQ_WORK_PENDING; atomic_set(&work->node.a_flags, flags); /* * See irq_work_claim(). */ smp_mb(); lockdep_irq_work_enter(flags); work->func(work); lockdep_irq_work_exit(flags); /* * Clear the BUSY bit, if set, and return to the free state if no-one * else claimed it meanwhile. */ (void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY); if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || !arch_irq_work_has_interrupt()) rcuwait_wake_up(&work->irqwait); } static void irq_work_run_list(struct llist_head *list) { struct irq_work *work, *tmp; struct llist_node *llnode; /* * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed * in a per-CPU thread in preemptible context. Only the items which are * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context. */ BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT)); if (llist_empty(list)) return; llnode = llist_del_all(list); llist_for_each_entry_safe(work, tmp, llnode, node.llist) irq_work_single(work); } /* * hotplug calls this through: * hotplug_cfd() -> flush_smp_call_function_queue() */ void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) irq_work_run_list(this_cpu_ptr(&lazy_list)); else wake_irq_workd(); } EXPORT_SYMBOL_GPL(irq_work_run); void irq_work_tick(void) { struct llist_head *raised = this_cpu_ptr(&raised_list); if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); if (!IS_ENABLED(CONFIG_PREEMPT_RT)) irq_work_run_list(this_cpu_ptr(&lazy_list)); else wake_irq_workd(); } /* * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */ void irq_work_sync(struct irq_work *work) { lockdep_assert_irqs_enabled(); might_sleep(); if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || !arch_irq_work_has_interrupt()) { rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), TASK_UNINTERRUPTIBLE); return; } while (irq_work_is_busy(work)) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); static void run_irq_workd(unsigned int cpu) { irq_work_run_list(this_cpu_ptr(&lazy_list)); } static void irq_workd_setup(unsigned int cpu) { sched_set_fifo_low(current); } static struct smp_hotplug_thread irqwork_threads = { .store = &irq_workd, .setup = irq_workd_setup, .thread_should_run = irq_workd_should_run, .thread_fn = run_irq_workd, .thread_comm = "irq_work/%u", }; static __init int irq_work_init_threads(void) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) BUG_ON(smpboot_register_percpu_thread(&irqwork_threads)); return 0; } early_initcall(irq_work_init_threads); |
| 14 14 19 15 19 8 8 19 13 1 12 21 11 21 9 3 7 9 16 16 12 15 15 11 9 3 16 19 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 | // SPDX-License-Identifier: GPL-2.0 /* * NHPoly1305 - ε-almost-∆-universal hash function for Adiantum * * Copyright 2018 Google LLC */ /* * "NHPoly1305" is the main component of Adiantum hashing. * Specifically, it is the calculation * * H_L ← Poly1305_{K_L}(NH_{K_N}(pad_{128}(L))) * * from the procedure in section 6.4 of the Adiantum paper [1]. It is an * ε-almost-∆-universal (ε-∆U) hash function for equal-length inputs over * Z/(2^{128}Z), where the "∆" operation is addition. It hashes 1024-byte * chunks of the input with the NH hash function [2], reducing the input length * by 32x. The resulting NH digests are evaluated as a polynomial in * GF(2^{130}-5), like in the Poly1305 MAC [3]. Note that the polynomial * evaluation by itself would suffice to achieve the ε-∆U property; NH is used * for performance since it's over twice as fast as Poly1305. * * This is *not* a cryptographic hash function; do not use it as such! * * [1] Adiantum: length-preserving encryption for entry-level processors * (https://eprint.iacr.org/2018/720.pdf) * [2] UMAC: Fast and Secure Message Authentication * (https://fastcrypto.org/umac/umac_proc.pdf) * [3] The Poly1305-AES message-authentication code * (https://cr.yp.to/mac/poly1305-20050329.pdf) */ #include <asm/unaligned.h> #include <crypto/algapi.h> #include <crypto/internal/hash.h> #include <crypto/internal/poly1305.h> #include <crypto/nhpoly1305.h> #include <linux/crypto.h> #include <linux/kernel.h> #include <linux/module.h> static void nh_generic(const u32 *key, const u8 *message, size_t message_len, __le64 hash[NH_NUM_PASSES]) { u64 sums[4] = { 0, 0, 0, 0 }; BUILD_BUG_ON(NH_PAIR_STRIDE != 2); BUILD_BUG_ON(NH_NUM_PASSES != 4); while (message_len) { u32 m0 = get_unaligned_le32(message + 0); u32 m1 = get_unaligned_le32(message + 4); u32 m2 = get_unaligned_le32(message + 8); u32 m3 = get_unaligned_le32(message + 12); sums[0] += (u64)(u32)(m0 + key[ 0]) * (u32)(m2 + key[ 2]); sums[1] += (u64)(u32)(m0 + key[ 4]) * (u32)(m2 + key[ 6]); sums[2] += (u64)(u32)(m0 + key[ 8]) * (u32)(m2 + key[10]); sums[3] += (u64)(u32)(m0 + key[12]) * (u32)(m2 + key[14]); sums[0] += (u64)(u32)(m1 + key[ 1]) * (u32)(m3 + key[ 3]); sums[1] += (u64)(u32)(m1 + key[ 5]) * (u32)(m3 + key[ 7]); sums[2] += (u64)(u32)(m1 + key[ 9]) * (u32)(m3 + key[11]); sums[3] += (u64)(u32)(m1 + key[13]) * (u32)(m3 + key[15]); key += NH_MESSAGE_UNIT / sizeof(key[0]); message += NH_MESSAGE_UNIT; message_len -= NH_MESSAGE_UNIT; } hash[0] = cpu_to_le64(sums[0]); hash[1] = cpu_to_le64(sums[1]); hash[2] = cpu_to_le64(sums[2]); hash[3] = cpu_to_le64(sums[3]); } /* Pass the next NH hash value through Poly1305 */ static void process_nh_hash_value(struct nhpoly1305_state *state, const struct nhpoly1305_key *key) { BUILD_BUG_ON(NH_HASH_BYTES % POLY1305_BLOCK_SIZE != 0); poly1305_core_blocks(&state->poly_state, &key->poly_key, state->nh_hash, NH_HASH_BYTES / POLY1305_BLOCK_SIZE, 1); } /* * Feed the next portion of the source data, as a whole number of 16-byte * "NH message units", through NH and Poly1305. Each NH hash is taken over * 1024 bytes, except possibly the final one which is taken over a multiple of * 16 bytes up to 1024. Also, in the case where data is passed in misaligned * chunks, we combine partial hashes; the end result is the same either way. */ static void nhpoly1305_units(struct nhpoly1305_state *state, const struct nhpoly1305_key *key, const u8 *src, unsigned int srclen, nh_t nh_fn) { do { unsigned int bytes; if (state->nh_remaining == 0) { /* Starting a new NH message */ bytes = min_t(unsigned int, srclen, NH_MESSAGE_BYTES); nh_fn(key->nh_key, src, bytes, state->nh_hash); state->nh_remaining = NH_MESSAGE_BYTES - bytes; } else { /* Continuing a previous NH message */ __le64 tmp_hash[NH_NUM_PASSES]; unsigned int pos; int i; pos = NH_MESSAGE_BYTES - state->nh_remaining; bytes = min(srclen, state->nh_remaining); nh_fn(&key->nh_key[pos / 4], src, bytes, tmp_hash); for (i = 0; i < NH_NUM_PASSES; i++) le64_add_cpu(&state->nh_hash[i], le64_to_cpu(tmp_hash[i])); state->nh_remaining -= bytes; } if (state->nh_remaining == 0) process_nh_hash_value(state, key); src += bytes; srclen -= bytes; } while (srclen); } int crypto_nhpoly1305_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct nhpoly1305_key *ctx = crypto_shash_ctx(tfm); int i; if (keylen != NHPOLY1305_KEY_SIZE) return -EINVAL; poly1305_core_setkey(&ctx->poly_key, key); key += POLY1305_BLOCK_SIZE; for (i = 0; i < NH_KEY_WORDS; i++) ctx->nh_key[i] = get_unaligned_le32(key + i * sizeof(u32)); return 0; } EXPORT_SYMBOL(crypto_nhpoly1305_setkey); int crypto_nhpoly1305_init(struct shash_desc *desc) { struct nhpoly1305_state *state = shash_desc_ctx(desc); poly1305_core_init(&state->poly_state); state->buflen = 0; state->nh_remaining = 0; return 0; } EXPORT_SYMBOL(crypto_nhpoly1305_init); int crypto_nhpoly1305_update_helper(struct shash_desc *desc, const u8 *src, unsigned int srclen, nh_t nh_fn) { struct nhpoly1305_state *state = shash_desc_ctx(desc); const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm); unsigned int bytes; if (state->buflen) { bytes = min(srclen, (int)NH_MESSAGE_UNIT - state->buflen); memcpy(&state->buffer[state->buflen], src, bytes); state->buflen += bytes; if (state->buflen < NH_MESSAGE_UNIT) return 0; nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT, nh_fn); state->buflen = 0; src += bytes; srclen -= bytes; } if (srclen >= NH_MESSAGE_UNIT) { bytes = round_down(srclen, NH_MESSAGE_UNIT); nhpoly1305_units(state, key, src, bytes, nh_fn); src += bytes; srclen -= bytes; } if (srclen) { memcpy(state->buffer, src, srclen); state->buflen = srclen; } return 0; } EXPORT_SYMBOL(crypto_nhpoly1305_update_helper); int crypto_nhpoly1305_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { return crypto_nhpoly1305_update_helper(desc, src, srclen, nh_generic); } EXPORT_SYMBOL(crypto_nhpoly1305_update); int crypto_nhpoly1305_final_helper(struct shash_desc *desc, u8 *dst, nh_t nh_fn) { struct nhpoly1305_state *state = shash_desc_ctx(desc); const struct nhpoly1305_key *key = crypto_shash_ctx(desc->tfm); if (state->buflen) { memset(&state->buffer[state->buflen], 0, NH_MESSAGE_UNIT - state->buflen); nhpoly1305_units(state, key, state->buffer, NH_MESSAGE_UNIT, nh_fn); } if (state->nh_remaining) process_nh_hash_value(state, key); poly1305_core_emit(&state->poly_state, NULL, dst); return 0; } EXPORT_SYMBOL(crypto_nhpoly1305_final_helper); int crypto_nhpoly1305_final(struct shash_desc *desc, u8 *dst) { return crypto_nhpoly1305_final_helper(desc, dst, nh_generic); } EXPORT_SYMBOL(crypto_nhpoly1305_final); static struct shash_alg nhpoly1305_alg = { .base.cra_name = "nhpoly1305", .base.cra_driver_name = "nhpoly1305-generic", .base.cra_priority = 100, .base.cra_ctxsize = sizeof(struct nhpoly1305_key), .base.cra_module = THIS_MODULE, .digestsize = POLY1305_DIGEST_SIZE, .init = crypto_nhpoly1305_init, .update = crypto_nhpoly1305_update, .final = crypto_nhpoly1305_final, .setkey = crypto_nhpoly1305_setkey, .descsize = sizeof(struct nhpoly1305_state), }; static int __init nhpoly1305_mod_init(void) { return crypto_register_shash(&nhpoly1305_alg); } static void __exit nhpoly1305_mod_exit(void) { crypto_unregister_shash(&nhpoly1305_alg); } subsys_initcall(nhpoly1305_mod_init); module_exit(nhpoly1305_mod_exit); MODULE_DESCRIPTION("NHPoly1305 ε-almost-∆-universal hash function"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>"); MODULE_ALIAS_CRYPTO("nhpoly1305"); MODULE_ALIAS_CRYPTO("nhpoly1305-generic"); |
| 25 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __PACKET_INTERNAL_H__ #define __PACKET_INTERNAL_H__ #include <linux/refcount.h> struct packet_mclist { struct packet_mclist *next; int ifindex; int count; unsigned short type; unsigned short alen; unsigned char addr[MAX_ADDR_LEN]; }; /* kbdq - kernel block descriptor queue */ struct tpacket_kbdq_core { struct pgv *pkbdq; unsigned int feature_req_word; unsigned int hdrlen; unsigned char reset_pending_on_curr_blk; unsigned char delete_blk_timer; unsigned short kactive_blk_num; unsigned short blk_sizeof_priv; /* last_kactive_blk_num: * trick to see if user-space has caught up * in order to avoid refreshing timer when every single pkt arrives. */ unsigned short last_kactive_blk_num; char *pkblk_start; char *pkblk_end; int kblk_size; unsigned int max_frame_len; unsigned int knum_blocks; uint64_t knxt_seq_num; char *prev; char *nxt_offset; struct sk_buff *skb; rwlock_t blk_fill_in_prog_lock; /* Default is set to 8ms */ #define DEFAULT_PRB_RETIRE_TOV (8) unsigned short retire_blk_tov; unsigned short version; unsigned long tov_in_jiffies; /* timer to retire an outstanding block */ struct timer_list retire_blk_timer; }; struct pgv { char *buffer; }; struct packet_ring_buffer { struct pgv *pg_vec; unsigned int head; unsigned int frames_per_block; unsigned int frame_size; unsigned int frame_max; unsigned int pg_vec_order; unsigned int pg_vec_pages; unsigned int pg_vec_len; unsigned int __percpu *pending_refcnt; union { unsigned long *rx_owner_map; struct tpacket_kbdq_core prb_bdqc; }; }; extern struct mutex fanout_mutex; #define PACKET_FANOUT_MAX (1 << 16) struct packet_fanout { possible_net_t net; unsigned int num_members; u32 max_num_members; u16 id; u8 type; u8 flags; union { atomic_t rr_cur; struct bpf_prog __rcu *bpf_prog; }; struct list_head list; spinlock_t lock; refcount_t sk_ref; struct packet_type prot_hook ____cacheline_aligned_in_smp; struct sock __rcu *arr[] __counted_by(max_num_members); }; struct packet_rollover { int sock; atomic_long_t num; atomic_long_t num_huge; atomic_long_t num_failed; #define ROLLOVER_HLEN (L1_CACHE_BYTES / sizeof(u32)) u32 history[ROLLOVER_HLEN] ____cacheline_aligned; } ____cacheline_aligned_in_smp; struct packet_sock { /* struct sock has to be the first member of packet_sock */ struct sock sk; struct packet_fanout *fanout; union tpacket_stats_u stats; struct packet_ring_buffer rx_ring; struct packet_ring_buffer tx_ring; int copy_thresh; spinlock_t bind_lock; struct mutex pg_vec_lock; unsigned long flags; int ifindex; /* bound device */ u8 vnet_hdr_sz; __be16 num; struct packet_rollover *rollover; struct packet_mclist *mclist; atomic_long_t mapped; enum tpacket_versions tp_version; unsigned int tp_hdrlen; unsigned int tp_reserve; unsigned int tp_tstamp; struct completion skb_completion; struct net_device __rcu *cached_dev; struct packet_type prot_hook ____cacheline_aligned_in_smp; atomic_t tp_drops ____cacheline_aligned_in_smp; }; #define pkt_sk(ptr) container_of_const(ptr, struct packet_sock, sk) enum packet_sock_flags { PACKET_SOCK_ORIGDEV, PACKET_SOCK_AUXDATA, PACKET_SOCK_TX_HAS_OFF, PACKET_SOCK_TP_LOSS, PACKET_SOCK_RUNNING, PACKET_SOCK_PRESSURE, PACKET_SOCK_QDISC_BYPASS, }; static inline void packet_sock_flag_set(struct packet_sock *po, enum packet_sock_flags flag, bool val) { if (val) set_bit(flag, &po->flags); else clear_bit(flag, &po->flags); } static inline bool packet_sock_flag(const struct packet_sock *po, enum packet_sock_flags flag) { return test_bit(flag, &po->flags); } #endif |
| 4 3 3 4 4 3 3 4 3 1 4 4 3 1 1 1 4 4 4 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 | // SPDX-License-Identifier: GPL-2.0-only /* * Driver for AzureWave 6007 DVB-C/T USB2.0 and clones * * Copyright (c) Henry Wang <Henry.wang@AzureWave.com> * * This driver was made publicly available by Terratec, at: * http://linux.terratec.de/files/TERRATEC_H7/20110323_TERRATEC_H7_Linux.tar.gz * The original driver's license is GPL, as declared with MODULE_LICENSE() * * Copyright (c) 2010-2012 Mauro Carvalho Chehab * Driver modified by in order to work with upstream drxk driver, and * tons of bugs got fixed, and converted to use dvb-usb-v2. */ #include "drxk.h" #include "mt2063.h" #include <media/dvb_ca_en50221.h> #include "dvb_usb.h" #include "cypress_firmware.h" #define AZ6007_FIRMWARE "dvb-usb-terratec-h7-az6007.fw" static int az6007_xfer_debug; module_param_named(xfer_debug, az6007_xfer_debug, int, 0644); MODULE_PARM_DESC(xfer_debug, "Enable xfer debug"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); /* Known requests (Cypress FX2 firmware + az6007 "private" ones*/ #define FX2_OED 0xb5 #define AZ6007_READ_DATA 0xb7 #define AZ6007_I2C_RD 0xb9 #define AZ6007_POWER 0xbc #define AZ6007_I2C_WR 0xbd #define FX2_SCON1 0xc0 #define AZ6007_TS_THROUGH 0xc7 #define AZ6007_READ_IR 0xb4 struct az6007_device_state { struct mutex mutex; struct mutex ca_mutex; struct dvb_ca_en50221 ca; unsigned warm:1; int (*gate_ctrl) (struct dvb_frontend *, int); unsigned char data[4096]; }; static struct drxk_config terratec_h7_drxk = { .adr = 0x29, .parallel_ts = true, .dynamic_clk = true, .single_master = true, .enable_merr_cfg = true, .no_i2c_bridge = false, .chunk_size = 64, .mpeg_out_clk_strength = 0x02, .qam_demod_parameter_count = 2, .microcode_name = "dvb-usb-terratec-h7-drxk.fw", }; static struct drxk_config cablestar_hdci_drxk = { .adr = 0x29, .parallel_ts = true, .dynamic_clk = true, .single_master = true, .enable_merr_cfg = true, .no_i2c_bridge = false, .chunk_size = 64, .mpeg_out_clk_strength = 0x02, .qam_demod_parameter_count = 2, .microcode_name = "dvb-usb-technisat-cablestar-hdci-drxk.fw", }; static int drxk_gate_ctrl(struct dvb_frontend *fe, int enable) { struct az6007_device_state *st = fe_to_priv(fe); struct dvb_usb_adapter *adap = fe->sec_priv; int status = 0; pr_debug("%s: %s\n", __func__, enable ? "enable" : "disable"); if (!adap || !st) return -EINVAL; if (enable) status = st->gate_ctrl(fe, 1); else status = st->gate_ctrl(fe, 0); return status; } static struct mt2063_config az6007_mt2063_config = { .tuner_address = 0x60, .refclock = 36125000, }; static int __az6007_read(struct usb_device *udev, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), req, USB_TYPE_VENDOR | USB_DIR_IN, value, index, b, blen, 5000); if (ret < 0) { pr_warn("usb read operation failed. (%d)\n", ret); return -EIO; } if (az6007_xfer_debug) { printk(KERN_DEBUG "az6007: IN req: %02x, value: %04x, index: %04x\n", req, value, index); print_hex_dump_bytes("az6007: payload: ", DUMP_PREFIX_NONE, b, blen); } return ret; } static int az6007_read(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { struct az6007_device_state *st = d->priv; int ret; if (mutex_lock_interruptible(&st->mutex) < 0) return -EAGAIN; ret = __az6007_read(d->udev, req, value, index, b, blen); mutex_unlock(&st->mutex); return ret; } static int __az6007_write(struct usb_device *udev, u8 req, u16 value, u16 index, u8 *b, int blen) { int ret; if (az6007_xfer_debug) { printk(KERN_DEBUG "az6007: OUT req: %02x, value: %04x, index: %04x\n", req, value, index); print_hex_dump_bytes("az6007: payload: ", DUMP_PREFIX_NONE, b, blen); } if (blen > 64) { pr_err("az6007: tried to write %d bytes, but I2C max size is 64 bytes\n", blen); return -EOPNOTSUPP; } ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, USB_TYPE_VENDOR | USB_DIR_OUT, value, index, b, blen, 5000); if (ret != blen) { pr_err("usb write operation failed. (%d)\n", ret); return -EIO; } return 0; } static int az6007_write(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) { struct az6007_device_state *st = d->priv; int ret; if (mutex_lock_interruptible(&st->mutex) < 0) return -EAGAIN; ret = __az6007_write(d->udev, req, value, index, b, blen); mutex_unlock(&st->mutex); return ret; } static int az6007_streaming_ctrl(struct dvb_frontend *fe, int onoff) { struct dvb_usb_device *d = fe_to_d(fe); pr_debug("%s: %s\n", __func__, onoff ? "enable" : "disable"); return az6007_write(d, 0xbc, onoff, 0, NULL, 0); } #if IS_ENABLED(CONFIG_RC_CORE) /* remote control stuff (does not work with my box) */ static int az6007_rc_query(struct dvb_usb_device *d) { struct az6007_device_state *st = d_to_priv(d); unsigned code; enum rc_proto proto; if (az6007_read(d, AZ6007_READ_IR, 0, 0, st->data, 10) < 0) return -EIO; if (st->data[1] == 0x44) return 0; if ((st->data[3] ^ st->data[4]) == 0xff) { if ((st->data[1] ^ st->data[2]) == 0xff) { code = RC_SCANCODE_NEC(st->data[1], st->data[3]); proto = RC_PROTO_NEC; } else { code = RC_SCANCODE_NECX(st->data[1] << 8 | st->data[2], st->data[3]); proto = RC_PROTO_NECX; } } else { code = RC_SCANCODE_NEC32(st->data[1] << 24 | st->data[2] << 16 | st->data[3] << 8 | st->data[4]); proto = RC_PROTO_NEC32; } rc_keydown(d->rc_dev, proto, code, st->data[5]); return 0; } static int az6007_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { pr_debug("Getting az6007 Remote Control properties\n"); rc->allowed_protos = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32; rc->query = az6007_rc_query; rc->interval = 400; return 0; } #else #define az6007_get_rc_config NULL #endif static int az6007_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value; u16 index; int blen; u8 *b; if (slot != 0) return -EINVAL; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC1; value = address; index = 0; blen = 1; ret = az6007_read(d, req, value, index, b, blen); if (ret < 0) { pr_warn("usb in operation failed. (%d)\n", ret); ret = -EINVAL; } else { ret = b[0]; } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static int az6007_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value1; u16 index; int blen; pr_debug("%s(), slot %d\n", __func__, slot); if (slot != 0) return -EINVAL; mutex_lock(&state->ca_mutex); req = 0xC2; value1 = address; index = value; blen = 0; ret = az6007_write(d, req, value1, index, NULL, blen); if (ret != 0) pr_warn("usb out operation failed. (%d)\n", ret); mutex_unlock(&state->ca_mutex); return ret; } static int az6007_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value; u16 index; int blen; u8 *b; if (slot != 0) return -EINVAL; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC3; value = address; index = 0; blen = 2; ret = az6007_read(d, req, value, index, b, blen); if (ret < 0) { pr_warn("usb in operation failed. (%d)\n", ret); ret = -EINVAL; } else { if (b[0] == 0) pr_warn("Read CI IO error\n"); ret = b[1]; pr_debug("read cam data = %x from 0x%x\n", b[1], value); } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static int az6007_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value1; u16 index; int blen; if (slot != 0) return -EINVAL; mutex_lock(&state->ca_mutex); req = 0xC4; value1 = address; index = value; blen = 0; ret = az6007_write(d, req, value1, index, NULL, blen); if (ret != 0) { pr_warn("usb out operation failed. (%d)\n", ret); goto failed; } failed: mutex_unlock(&state->ca_mutex); return ret; } static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = ca->data; int ret; u8 req; u16 value; u16 index; int blen; u8 *b; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; req = 0xC8; value = 0; index = 0; blen = 1; ret = az6007_read(d, req, value, index, b, blen); if (ret < 0) { pr_warn("usb in operation failed. (%d)\n", ret); ret = -EIO; } else{ ret = b[0]; } kfree(b); return ret; } static int az6007_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret, i; u8 req; u16 value; u16 index; int blen; mutex_lock(&state->ca_mutex); req = 0xC6; value = 1; index = 0; blen = 0; ret = az6007_write(d, req, value, index, NULL, blen); if (ret != 0) { pr_warn("usb out operation failed. (%d)\n", ret); goto failed; } msleep(500); req = 0xC6; value = 0; index = 0; blen = 0; ret = az6007_write(d, req, value, index, NULL, blen); if (ret != 0) { pr_warn("usb out operation failed. (%d)\n", ret); goto failed; } for (i = 0; i < 15; i++) { msleep(100); if (CI_CamReady(ca, slot)) { pr_debug("CAM Ready\n"); break; } } msleep(5000); failed: mutex_unlock(&state->ca_mutex); return ret; } static int az6007_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { return 0; } static int az6007_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value; u16 index; int blen; pr_debug("%s()\n", __func__); mutex_lock(&state->ca_mutex); req = 0xC7; value = 1; index = 0; blen = 0; ret = az6007_write(d, req, value, index, NULL, blen); if (ret != 0) { pr_warn("usb out operation failed. (%d)\n", ret); goto failed; } failed: mutex_unlock(&state->ca_mutex); return ret; } static int az6007_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { struct dvb_usb_device *d = ca->data; struct az6007_device_state *state = d_to_priv(d); int ret; u8 req; u16 value; u16 index; int blen; u8 *b; b = kmalloc(12, GFP_KERNEL); if (!b) return -ENOMEM; mutex_lock(&state->ca_mutex); req = 0xC5; value = 0; index = 0; blen = 1; ret = az6007_read(d, req, value, index, b, blen); if (ret < 0) { pr_warn("usb in operation failed. (%d)\n", ret); ret = -EIO; } else ret = 0; if (!ret && b[0] == 1) { ret = DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; } mutex_unlock(&state->ca_mutex); kfree(b); return ret; } static void az6007_ci_uninit(struct dvb_usb_device *d) { struct az6007_device_state *state; pr_debug("%s()\n", __func__); if (NULL == d) return; state = d_to_priv(d); if (NULL == state) return; if (NULL == state->ca.data) return; dvb_ca_en50221_release(&state->ca); memset(&state->ca, 0, sizeof(state->ca)); } static int az6007_ci_init(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap_to_d(adap); struct az6007_device_state *state = adap_to_priv(adap); int ret; pr_debug("%s()\n", __func__); mutex_init(&state->ca_mutex); state->ca.owner = THIS_MODULE; state->ca.read_attribute_mem = az6007_ci_read_attribute_mem; state->ca.write_attribute_mem = az6007_ci_write_attribute_mem; state->ca.read_cam_control = az6007_ci_read_cam_control; state->ca.write_cam_control = az6007_ci_write_cam_control; state->ca.slot_reset = az6007_ci_slot_reset; state->ca.slot_shutdown = az6007_ci_slot_shutdown; state->ca.slot_ts_enable = az6007_ci_slot_ts_enable; state->ca.poll_slot_status = az6007_ci_poll_slot_status; state->ca.data = d; ret = dvb_ca_en50221_init(&adap->dvb_adap, &state->ca, 0, /* flags */ 1);/* n_slots */ if (ret != 0) { pr_err("Cannot initialize CI: Error %d.\n", ret); memset(&state->ca, 0, sizeof(state->ca)); return ret; } pr_debug("CI initialized.\n"); return 0; } static int az6007_read_mac_addr(struct dvb_usb_adapter *adap, u8 mac[6]) { struct dvb_usb_device *d = adap_to_d(adap); struct az6007_device_state *st = adap_to_priv(adap); int ret; ret = az6007_read(d, AZ6007_READ_DATA, 6, 0, st->data, 6); memcpy(mac, st->data, 6); if (ret > 0) pr_debug("%s: mac is %pM\n", __func__, mac); return ret; } static int az6007_frontend_attach(struct dvb_usb_adapter *adap) { struct az6007_device_state *st = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); pr_debug("attaching demod drxk\n"); adap->fe[0] = dvb_attach(drxk_attach, &terratec_h7_drxk, &d->i2c_adap); if (!adap->fe[0]) return -EINVAL; adap->fe[0]->sec_priv = adap; st->gate_ctrl = adap->fe[0]->ops.i2c_gate_ctrl; adap->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl; az6007_ci_init(adap); return 0; } static int az6007_cablestar_hdci_frontend_attach(struct dvb_usb_adapter *adap) { struct az6007_device_state *st = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); pr_debug("attaching demod drxk\n"); adap->fe[0] = dvb_attach(drxk_attach, &cablestar_hdci_drxk, &d->i2c_adap); if (!adap->fe[0]) return -EINVAL; adap->fe[0]->sec_priv = adap; st->gate_ctrl = adap->fe[0]->ops.i2c_gate_ctrl; adap->fe[0]->ops.i2c_gate_ctrl = drxk_gate_ctrl; az6007_ci_init(adap); return 0; } static int az6007_tuner_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *d = adap_to_d(adap); pr_debug("attaching tuner mt2063\n"); /* Attach mt2063 to DVB-C frontend */ if (adap->fe[0]->ops.i2c_gate_ctrl) adap->fe[0]->ops.i2c_gate_ctrl(adap->fe[0], 1); if (!dvb_attach(mt2063_attach, adap->fe[0], &az6007_mt2063_config, &d->i2c_adap)) return -EINVAL; if (adap->fe[0]->ops.i2c_gate_ctrl) adap->fe[0]->ops.i2c_gate_ctrl(adap->fe[0], 0); return 0; } static int az6007_power_ctrl(struct dvb_usb_device *d, int onoff) { struct az6007_device_state *state = d_to_priv(d); int ret; pr_debug("%s()\n", __func__); if (!state->warm) { mutex_init(&state->mutex); ret = az6007_write(d, AZ6007_POWER, 0, 2, NULL, 0); if (ret < 0) return ret; msleep(60); ret = az6007_write(d, AZ6007_POWER, 1, 4, NULL, 0); if (ret < 0) return ret; msleep(100); ret = az6007_write(d, AZ6007_POWER, 1, 3, NULL, 0); if (ret < 0) return ret; msleep(20); ret = az6007_write(d, AZ6007_POWER, 1, 4, NULL, 0); if (ret < 0) return ret; msleep(400); ret = az6007_write(d, FX2_SCON1, 0, 3, NULL, 0); if (ret < 0) return ret; msleep(150); ret = az6007_write(d, FX2_SCON1, 1, 3, NULL, 0); if (ret < 0) return ret; msleep(430); ret = az6007_write(d, AZ6007_POWER, 0, 0, NULL, 0); if (ret < 0) return ret; state->warm = true; return 0; } if (!onoff) return 0; az6007_write(d, AZ6007_POWER, 0, 0, NULL, 0); az6007_write(d, AZ6007_TS_THROUGH, 0, 0, NULL, 0); return 0; } /* I2C */ static int az6007_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct az6007_device_state *st = d_to_priv(d); int i, j, len; int ret = 0; u16 index; u16 value; int length; u8 req, addr; if (mutex_lock_interruptible(&st->mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { addr = msgs[i].addr << 1; if (((i + 1) < num) && (msgs[i].len == 1) && ((msgs[i].flags & I2C_M_RD) != I2C_M_RD) && (msgs[i + 1].flags & I2C_M_RD) && (msgs[i].addr == msgs[i + 1].addr)) { /* * A write + read xfer for the same address, where * the first xfer has just 1 byte length. * Need to join both into one operation */ if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C W/R addr=0x%x len=%d/%d\n", addr, msgs[i].len, msgs[i + 1].len); req = AZ6007_I2C_RD; index = msgs[i].buf[0]; value = addr | (1 << 8); length = 6 + msgs[i + 1].len; len = msgs[i + 1].len; ret = __az6007_read(d->udev, req, value, index, st->data, length); if (ret >= len) { for (j = 0; j < len; j++) msgs[i + 1].buf[j] = st->data[j + 5]; } else ret = -EIO; i++; } else if (!(msgs[i].flags & I2C_M_RD)) { /* write bytes */ if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C W addr=0x%x len=%d\n", addr, msgs[i].len); if (msgs[i].len < 1) { ret = -EIO; goto err; } req = AZ6007_I2C_WR; index = msgs[i].buf[0]; value = addr | (1 << 8); length = msgs[i].len - 1; len = msgs[i].len - 1; for (j = 0; j < len; j++) st->data[j] = msgs[i].buf[j + 1]; ret = __az6007_write(d->udev, req, value, index, st->data, length); } else { /* read bytes */ if (az6007_xfer_debug) printk(KERN_DEBUG "az6007: I2C R addr=0x%x len=%d\n", addr, msgs[i].len); if (msgs[i].len < 1) { ret = -EIO; goto err; } req = AZ6007_I2C_RD; index = msgs[i].buf[0]; value = addr; length = msgs[i].len + 6; len = msgs[i].len; ret = __az6007_read(d->udev, req, value, index, st->data, length); for (j = 0; j < len; j++) msgs[i].buf[j] = st->data[j + 5]; } if (ret < 0) goto err; } err: mutex_unlock(&st->mutex); if (ret < 0) { pr_info("%s ERROR: %i\n", __func__, ret); return ret; } return num; } static u32 az6007_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm az6007_i2c_algo = { .master_xfer = az6007_i2c_xfer, .functionality = az6007_i2c_func, }; static int az6007_identify_state(struct dvb_usb_device *d, const char **name) { int ret; u8 *mac; pr_debug("Identifying az6007 state\n"); mac = kmalloc(6, GFP_ATOMIC); if (!mac) return -ENOMEM; /* Try to read the mac address */ ret = __az6007_read(d->udev, AZ6007_READ_DATA, 6, 0, mac, 6); if (ret == 6) ret = WARM; else ret = COLD; kfree(mac); if (ret == COLD) { __az6007_write(d->udev, 0x09, 1, 0, NULL, 0); __az6007_write(d->udev, 0x00, 0, 0, NULL, 0); __az6007_write(d->udev, 0x00, 0, 0, NULL, 0); } pr_debug("Device is on %s state\n", ret == WARM ? "warm" : "cold"); return ret; } static void az6007_usb_disconnect(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); az6007_ci_uninit(d); dvb_usbv2_disconnect(intf); } static int az6007_download_firmware(struct dvb_usb_device *d, const struct firmware *fw) { pr_debug("Loading az6007 firmware\n"); return cypress_load_firmware(d->udev, fw, CYPRESS_FX2); } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties az6007_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .firmware = AZ6007_FIRMWARE, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct az6007_device_state), .i2c_algo = &az6007_i2c_algo, .tuner_attach = az6007_tuner_attach, .frontend_attach = az6007_frontend_attach, .streaming_ctrl = az6007_streaming_ctrl, .get_rc_config = az6007_get_rc_config, .read_mac_address = az6007_read_mac_addr, .download_firmware = az6007_download_firmware, .identify_state = az6007_identify_state, .power_ctrl = az6007_power_ctrl, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_BULK(0x02, 10, 4096), } } }; static struct dvb_usb_device_properties az6007_cablestar_hdci_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .firmware = AZ6007_FIRMWARE, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct az6007_device_state), .i2c_algo = &az6007_i2c_algo, .tuner_attach = az6007_tuner_attach, .frontend_attach = az6007_cablestar_hdci_frontend_attach, .streaming_ctrl = az6007_streaming_ctrl, /* ditch get_rc_config as it can't work (TS35 remote, I believe it's rc5) */ .get_rc_config = NULL, .read_mac_address = az6007_read_mac_addr, .download_firmware = az6007_download_firmware, .identify_state = az6007_identify_state, .power_ctrl = az6007_power_ctrl, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_BULK(0x02, 10, 4096), } } }; static const struct usb_device_id az6007_usb_table[] = { {DVB_USB_DEVICE(USB_VID_AZUREWAVE, USB_PID_AZUREWAVE_6007, &az6007_props, "Azurewave 6007", RC_MAP_EMPTY)}, {DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7, &az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)}, {DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_H7_2, &az6007_props, "Terratec H7", RC_MAP_NEC_TERRATEC_CINERGY_XS)}, {DVB_USB_DEVICE(USB_VID_TECHNISAT, USB_PID_TECHNISAT_USB2_CABLESTAR_HDCI, &az6007_cablestar_hdci_props, "Technisat CableStar Combo HD CI", RC_MAP_EMPTY)}, {0}, }; MODULE_DEVICE_TABLE(usb, az6007_usb_table); static int az6007_suspend(struct usb_interface *intf, pm_message_t msg) { struct dvb_usb_device *d = usb_get_intfdata(intf); az6007_ci_uninit(d); return dvb_usbv2_suspend(intf, msg); } static int az6007_resume(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); struct dvb_usb_adapter *adap = &d->adapter[0]; az6007_ci_init(adap); return dvb_usbv2_resume(intf); } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver az6007_usb_driver = { .name = KBUILD_MODNAME, .id_table = az6007_usb_table, .probe = dvb_usbv2_probe, .disconnect = az6007_usb_disconnect, .no_dynamic_id = 1, .soft_unbind = 1, /* * FIXME: need to implement reset_resume, likely with * dvb-usb-v2 core support */ .suspend = az6007_suspend, .resume = az6007_resume, }; module_usb_driver(az6007_usb_driver); MODULE_AUTHOR("Henry Wang <Henry.wang@AzureWave.com>"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_DESCRIPTION("Driver for AzureWave 6007 DVB-C/T USB2.0 and clones"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(AZ6007_FIRMWARE); |
| 25 4 10 10 7 7 2 10 10 10 7 1 3 3 2 1 3 7 5 2 5 1 1 2 1 1 26 1 44 2 4 38 35 2 5 32 4 29 3 5 2 19 14 2 3 2 7 3 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 | /* net/tipc/udp_media.c: IP bearer support for TIPC * * Copyright (c) 2015, Ericsson AB * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/socket.h> #include <linux/ip.h> #include <linux/udp.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/igmp.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/list.h> #include <net/sock.h> #include <net/ip.h> #include <net/udp_tunnel.h> #include <net/ipv6_stubs.h> #include <linux/tipc_netlink.h> #include "core.h" #include "addr.h" #include "net.h" #include "bearer.h" #include "netlink.h" #include "msg.h" #include "udp_media.h" /* IANA assigned UDP port */ #define UDP_PORT_DEFAULT 6118 #define UDP_MIN_HEADROOM 48 /** * struct udp_media_addr - IP/UDP addressing information * * This is the bearer level originating address used in neighbor discovery * messages, and all fields should be in network byte order * * @proto: Ethernet protocol in use * @port: port being used * @ipv4: IPv4 address of neighbor * @ipv6: IPv6 address of neighbor */ struct udp_media_addr { __be16 proto; __be16 port; union { struct in_addr ipv4; struct in6_addr ipv6; }; }; /* struct udp_replicast - container for UDP remote addresses */ struct udp_replicast { struct udp_media_addr addr; struct dst_cache dst_cache; struct rcu_head rcu; struct list_head list; }; /** * struct udp_bearer - ip/udp bearer data structure * @bearer: associated generic tipc bearer * @ubsock: bearer associated socket * @ifindex: local address scope * @work: used to schedule deferred work on a bearer * @rcast: associated udp_replicast container */ struct udp_bearer { struct tipc_bearer __rcu *bearer; struct socket *ubsock; u32 ifindex; struct work_struct work; struct udp_replicast rcast; }; static int tipc_udp_is_mcast_addr(struct udp_media_addr *addr) { if (ntohs(addr->proto) == ETH_P_IP) return ipv4_is_multicast(addr->ipv4.s_addr); #if IS_ENABLED(CONFIG_IPV6) else return ipv6_addr_is_multicast(&addr->ipv6); #endif return 0; } /* udp_media_addr_set - convert a ip/udp address to a TIPC media address */ static void tipc_udp_media_addr_set(struct tipc_media_addr *addr, struct udp_media_addr *ua) { memset(addr, 0, sizeof(struct tipc_media_addr)); addr->media_id = TIPC_MEDIA_TYPE_UDP; memcpy(addr->value, ua, sizeof(struct udp_media_addr)); if (tipc_udp_is_mcast_addr(ua)) addr->broadcast = TIPC_BROADCAST_SUPPORT; } /* tipc_udp_addr2str - convert ip/udp address to string */ static int tipc_udp_addr2str(struct tipc_media_addr *a, char *buf, int size) { struct udp_media_addr *ua = (struct udp_media_addr *)&a->value; if (ntohs(ua->proto) == ETH_P_IP) snprintf(buf, size, "%pI4:%u", &ua->ipv4, ntohs(ua->port)); else if (ntohs(ua->proto) == ETH_P_IPV6) snprintf(buf, size, "%pI6:%u", &ua->ipv6, ntohs(ua->port)); else { pr_err("Invalid UDP media address\n"); return 1; } return 0; } /* tipc_udp_msg2addr - extract an ip/udp address from a TIPC ndisc message */ static int tipc_udp_msg2addr(struct tipc_bearer *b, struct tipc_media_addr *a, char *msg) { struct udp_media_addr *ua; ua = (struct udp_media_addr *) (msg + TIPC_MEDIA_ADDR_OFFSET); if (msg[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_UDP) return -EINVAL; tipc_udp_media_addr_set(a, ua); return 0; } /* tipc_udp_addr2msg - write an ip/udp address to a TIPC ndisc message */ static int tipc_udp_addr2msg(char *msg, struct tipc_media_addr *a) { memset(msg, 0, TIPC_MEDIA_INFO_SIZE); msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_UDP; memcpy(msg + TIPC_MEDIA_ADDR_OFFSET, a->value, sizeof(struct udp_media_addr)); return 0; } /* tipc_send_msg - enqueue a send request */ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, struct udp_bearer *ub, struct udp_media_addr *src, struct udp_media_addr *dst, struct dst_cache *cache) { struct dst_entry *ndst; int ttl, err = 0; local_bh_disable(); ndst = dst_cache_get(cache); if (dst->proto == htons(ETH_P_IP)) { struct rtable *rt = dst_rtable(ndst); if (!rt) { struct flowi4 fl = { .daddr = dst->ipv4.s_addr, .saddr = src->ipv4.s_addr, .flowi4_mark = skb->mark, .flowi4_proto = IPPROTO_UDP }; rt = ip_route_output_key(net, &fl); if (IS_ERR(rt)) { err = PTR_ERR(rt); goto tx_error; } dst_cache_set_ip4(cache, &rt->dst, fl.saddr); } ttl = ip4_dst_hoplimit(&rt->dst); udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr, dst->ipv4.s_addr, 0, ttl, 0, src->port, dst->port, false, true); #if IS_ENABLED(CONFIG_IPV6) } else { if (!ndst) { struct flowi6 fl6 = { .flowi6_oif = ub->ifindex, .daddr = dst->ipv6, .saddr = src->ipv6, .flowi6_proto = IPPROTO_UDP }; ndst = ipv6_stub->ipv6_dst_lookup_flow(net, ub->ubsock->sk, &fl6, NULL); if (IS_ERR(ndst)) { err = PTR_ERR(ndst); goto tx_error; } dst_cache_set_ip6(cache, ndst, &fl6.saddr); } ttl = ip6_dst_hoplimit(ndst); err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL, &src->ipv6, &dst->ipv6, 0, ttl, 0, src->port, dst->port, false); #endif } local_bh_enable(); return err; tx_error: local_bh_enable(); kfree_skb(skb); return err; } static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, struct tipc_bearer *b, struct tipc_media_addr *addr) { struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; struct udp_media_addr *dst = (struct udp_media_addr *)&addr->value; struct udp_replicast *rcast; struct udp_bearer *ub; int err = 0; if (skb_headroom(skb) < UDP_MIN_HEADROOM) { err = pskb_expand_head(skb, UDP_MIN_HEADROOM, 0, GFP_ATOMIC); if (err) goto out; } skb_set_inner_protocol(skb, htons(ETH_P_TIPC)); ub = rcu_dereference(b->media_ptr); if (!ub) { err = -ENODEV; goto out; } if (addr->broadcast != TIPC_REPLICAST_SUPPORT) return tipc_udp_xmit(net, skb, ub, src, dst, &ub->rcast.dst_cache); /* Replicast, send an skb to each configured IP address */ list_for_each_entry_rcu(rcast, &ub->rcast.list, list) { struct sk_buff *_skb; _skb = pskb_copy(skb, GFP_ATOMIC); if (!_skb) { err = -ENOMEM; goto out; } err = tipc_udp_xmit(net, _skb, ub, src, &rcast->addr, &rcast->dst_cache); if (err) goto out; } err = 0; out: kfree_skb(skb); return err; } static bool tipc_udp_is_known_peer(struct tipc_bearer *b, struct udp_media_addr *addr) { struct udp_replicast *rcast, *tmp; struct udp_bearer *ub; ub = rcu_dereference_rtnl(b->media_ptr); if (!ub) { pr_err_ratelimited("UDP bearer instance not found\n"); return false; } list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) { if (!memcmp(&rcast->addr, addr, sizeof(struct udp_media_addr))) return true; } return false; } static int tipc_udp_rcast_add(struct tipc_bearer *b, struct udp_media_addr *addr) { struct udp_replicast *rcast; struct udp_bearer *ub; ub = rcu_dereference_rtnl(b->media_ptr); if (!ub) return -ENODEV; rcast = kmalloc(sizeof(*rcast), GFP_ATOMIC); if (!rcast) return -ENOMEM; if (dst_cache_init(&rcast->dst_cache, GFP_ATOMIC)) { kfree(rcast); return -ENOMEM; } memcpy(&rcast->addr, addr, sizeof(struct udp_media_addr)); if (ntohs(addr->proto) == ETH_P_IP) pr_info("New replicast peer: %pI4\n", &rcast->addr.ipv4); #if IS_ENABLED(CONFIG_IPV6) else if (ntohs(addr->proto) == ETH_P_IPV6) pr_info("New replicast peer: %pI6\n", &rcast->addr.ipv6); #endif b->bcast_addr.broadcast = TIPC_REPLICAST_SUPPORT; list_add_rcu(&rcast->list, &ub->rcast.list); return 0; } static int tipc_udp_rcast_disc(struct tipc_bearer *b, struct sk_buff *skb) { struct udp_media_addr src = {0}; struct udp_media_addr *dst; dst = (struct udp_media_addr *)&b->bcast_addr.value; if (tipc_udp_is_mcast_addr(dst)) return 0; src.port = udp_hdr(skb)->source; if (ip_hdr(skb)->version == 4) { struct iphdr *iphdr = ip_hdr(skb); src.proto = htons(ETH_P_IP); src.ipv4.s_addr = iphdr->saddr; if (ipv4_is_multicast(iphdr->daddr)) return 0; #if IS_ENABLED(CONFIG_IPV6) } else if (ip_hdr(skb)->version == 6) { struct ipv6hdr *iphdr = ipv6_hdr(skb); src.proto = htons(ETH_P_IPV6); src.ipv6 = iphdr->saddr; if (ipv6_addr_is_multicast(&iphdr->daddr)) return 0; #endif } else { return 0; } if (likely(tipc_udp_is_known_peer(b, &src))) return 0; return tipc_udp_rcast_add(b, &src); } /* tipc_udp_recv - read data from bearer socket */ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) { struct udp_bearer *ub; struct tipc_bearer *b; struct tipc_msg *hdr; int err; ub = rcu_dereference_sk_user_data(sk); if (!ub) { pr_err_ratelimited("Failed to get UDP bearer reference"); goto out; } skb_pull(skb, sizeof(struct udphdr)); hdr = buf_msg(skb); b = rcu_dereference(ub->bearer); if (!b) goto out; if (b && test_bit(0, &b->up)) { TIPC_SKB_CB(skb)->flags = 0; tipc_rcv(sock_net(sk), skb, b); return 0; } if (unlikely(msg_user(hdr) == LINK_CONFIG)) { err = tipc_udp_rcast_disc(b, skb); if (err) goto out; } out: kfree_skb(skb); return 0; } static int enable_mcast(struct udp_bearer *ub, struct udp_media_addr *remote) { int err = 0; struct ip_mreqn mreqn; struct sock *sk = ub->ubsock->sk; if (ntohs(remote->proto) == ETH_P_IP) { mreqn.imr_multiaddr = remote->ipv4; mreqn.imr_ifindex = ub->ifindex; err = ip_mc_join_group(sk, &mreqn); #if IS_ENABLED(CONFIG_IPV6) } else { lock_sock(sk); err = ipv6_stub->ipv6_sock_mc_join(sk, ub->ifindex, &remote->ipv6); release_sock(sk); #endif } return err; } static int __tipc_nl_add_udp_addr(struct sk_buff *skb, struct udp_media_addr *addr, int nla_t) { if (ntohs(addr->proto) == ETH_P_IP) { struct sockaddr_in ip4; memset(&ip4, 0, sizeof(ip4)); ip4.sin_family = AF_INET; ip4.sin_port = addr->port; ip4.sin_addr.s_addr = addr->ipv4.s_addr; if (nla_put(skb, nla_t, sizeof(ip4), &ip4)) return -EMSGSIZE; #if IS_ENABLED(CONFIG_IPV6) } else if (ntohs(addr->proto) == ETH_P_IPV6) { struct sockaddr_in6 ip6; memset(&ip6, 0, sizeof(ip6)); ip6.sin6_family = AF_INET6; ip6.sin6_port = addr->port; memcpy(&ip6.sin6_addr, &addr->ipv6, sizeof(struct in6_addr)); if (nla_put(skb, nla_t, sizeof(ip6), &ip6)) return -EMSGSIZE; #endif } return 0; } int tipc_udp_nl_dump_remoteip(struct sk_buff *skb, struct netlink_callback *cb) { u32 bid = cb->args[0]; u32 skip_cnt = cb->args[1]; u32 portid = NETLINK_CB(cb->skb).portid; struct udp_replicast *rcast, *tmp; struct tipc_bearer *b; struct udp_bearer *ub; void *hdr; int err; int i; if (!bid && !skip_cnt) { struct nlattr **attrs = genl_dumpit_info(cb)->info.attrs; struct net *net = sock_net(skb->sk); struct nlattr *battrs[TIPC_NLA_BEARER_MAX + 1]; char *bname; if (!attrs[TIPC_NLA_BEARER]) return -EINVAL; err = nla_parse_nested_deprecated(battrs, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER], tipc_nl_bearer_policy, NULL); if (err) return err; if (!battrs[TIPC_NLA_BEARER_NAME]) return -EINVAL; bname = nla_data(battrs[TIPC_NLA_BEARER_NAME]); rtnl_lock(); b = tipc_bearer_find(net, bname); if (!b) { rtnl_unlock(); return -EINVAL; } bid = b->identity; } else { struct net *net = sock_net(skb->sk); struct tipc_net *tn = net_generic(net, tipc_net_id); rtnl_lock(); b = rtnl_dereference(tn->bearer_list[bid]); if (!b) { rtnl_unlock(); return -EINVAL; } } ub = rtnl_dereference(b->media_ptr); if (!ub) { rtnl_unlock(); return -EINVAL; } i = 0; list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) { if (i < skip_cnt) goto count; hdr = genlmsg_put(skb, portid, cb->nlh->nlmsg_seq, &tipc_genl_family, NLM_F_MULTI, TIPC_NL_BEARER_GET); if (!hdr) goto done; err = __tipc_nl_add_udp_addr(skb, &rcast->addr, TIPC_NLA_UDP_REMOTE); if (err) { genlmsg_cancel(skb, hdr); goto done; } genlmsg_end(skb, hdr); count: i++; } done: rtnl_unlock(); cb->args[0] = bid; cb->args[1] = i; return skb->len; } int tipc_udp_nl_add_bearer_data(struct tipc_nl_msg *msg, struct tipc_bearer *b) { struct udp_media_addr *src = (struct udp_media_addr *)&b->addr.value; struct udp_media_addr *dst; struct udp_bearer *ub; struct nlattr *nest; ub = rtnl_dereference(b->media_ptr); if (!ub) return -ENODEV; nest = nla_nest_start_noflag(msg->skb, TIPC_NLA_BEARER_UDP_OPTS); if (!nest) goto msg_full; if (__tipc_nl_add_udp_addr(msg->skb, src, TIPC_NLA_UDP_LOCAL)) goto msg_full; dst = (struct udp_media_addr *)&b->bcast_addr.value; if (__tipc_nl_add_udp_addr(msg->skb, dst, TIPC_NLA_UDP_REMOTE)) goto msg_full; if (!list_empty(&ub->rcast.list)) { if (nla_put_flag(msg->skb, TIPC_NLA_UDP_MULTI_REMOTEIP)) goto msg_full; } nla_nest_end(msg->skb, nest); return 0; msg_full: nla_nest_cancel(msg->skb, nest); return -EMSGSIZE; } /** * tipc_parse_udp_addr - build udp media address from netlink data * @nla: netlink attribute containing sockaddr storage aligned address * @addr: tipc media address to fill with address, port and protocol type * @scope_id: IPv6 scope id pointer, not NULL indicates it's required */ static int tipc_parse_udp_addr(struct nlattr *nla, struct udp_media_addr *addr, u32 *scope_id) { struct sockaddr_storage sa; nla_memcpy(&sa, nla, sizeof(sa)); if (sa.ss_family == AF_INET) { struct sockaddr_in *ip4 = (struct sockaddr_in *)&sa; addr->proto = htons(ETH_P_IP); addr->port = ip4->sin_port; addr->ipv4.s_addr = ip4->sin_addr.s_addr; return 0; #if IS_ENABLED(CONFIG_IPV6) } else if (sa.ss_family == AF_INET6) { struct sockaddr_in6 *ip6 = (struct sockaddr_in6 *)&sa; addr->proto = htons(ETH_P_IPV6); addr->port = ip6->sin6_port; memcpy(&addr->ipv6, &ip6->sin6_addr, sizeof(struct in6_addr)); /* Scope ID is only interesting for local addresses */ if (scope_id) { int atype; atype = ipv6_addr_type(&ip6->sin6_addr); if (__ipv6_addr_needs_scope_id(atype) && !ip6->sin6_scope_id) { return -EINVAL; } *scope_id = ip6->sin6_scope_id ? : 0; } return 0; #endif } return -EADDRNOTAVAIL; } int tipc_udp_nl_bearer_add(struct tipc_bearer *b, struct nlattr *attr) { int err; struct udp_media_addr addr = {0}; struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; struct udp_media_addr *dst; if (nla_parse_nested_deprecated(opts, TIPC_NLA_UDP_MAX, attr, tipc_nl_udp_policy, NULL)) return -EINVAL; if (!opts[TIPC_NLA_UDP_REMOTE]) return -EINVAL; err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_REMOTE], &addr, NULL); if (err) return err; dst = (struct udp_media_addr *)&b->bcast_addr.value; if (tipc_udp_is_mcast_addr(dst)) { pr_err("Can't add remote ip to TIPC UDP multicast bearer\n"); return -EINVAL; } if (tipc_udp_is_known_peer(b, &addr)) return 0; return tipc_udp_rcast_add(b, &addr); } /** * tipc_udp_enable - callback to create a new udp bearer instance * @net: network namespace * @b: pointer to generic tipc_bearer * @attrs: netlink bearer configuration * * validate the bearer parameters and initialize the udp bearer * rtnl_lock should be held */ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b, struct nlattr *attrs[]) { int err = -EINVAL; struct udp_bearer *ub; struct udp_media_addr remote = {0}; struct udp_media_addr local = {0}; struct udp_port_cfg udp_conf = {0}; struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct nlattr *opts[TIPC_NLA_UDP_MAX + 1]; u8 node_id[NODE_ID_LEN] = {0,}; struct net_device *dev; int rmcast = 0; ub = kzalloc(sizeof(*ub), GFP_ATOMIC); if (!ub) return -ENOMEM; INIT_LIST_HEAD(&ub->rcast.list); if (!attrs[TIPC_NLA_BEARER_UDP_OPTS]) goto err; if (nla_parse_nested_deprecated(opts, TIPC_NLA_UDP_MAX, attrs[TIPC_NLA_BEARER_UDP_OPTS], tipc_nl_udp_policy, NULL)) goto err; if (!opts[TIPC_NLA_UDP_LOCAL] || !opts[TIPC_NLA_UDP_REMOTE]) { pr_err("Invalid UDP bearer configuration"); err = -EINVAL; goto err; } err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_LOCAL], &local, &ub->ifindex); if (err) goto err; err = tipc_parse_udp_addr(opts[TIPC_NLA_UDP_REMOTE], &remote, NULL); if (err) goto err; if (remote.proto != local.proto) { err = -EINVAL; goto err; } /* Checking remote ip address */ rmcast = tipc_udp_is_mcast_addr(&remote); /* Autoconfigure own node identity if needed */ if (!tipc_own_id(net)) { memcpy(node_id, local.ipv6.in6_u.u6_addr8, 16); tipc_net_init(net, node_id, 0); } if (!tipc_own_id(net)) { pr_warn("Failed to set node id, please configure manually\n"); err = -EINVAL; goto err; } b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP; b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT; rcu_assign_pointer(b->media_ptr, ub); rcu_assign_pointer(ub->bearer, b); tipc_udp_media_addr_set(&b->addr, &local); if (local.proto == htons(ETH_P_IP)) { dev = __ip_dev_find(net, local.ipv4.s_addr, false); if (!dev) { err = -ENODEV; goto err; } udp_conf.family = AF_INET; /* Switch to use ANY to receive packets from group */ if (rmcast) udp_conf.local_ip.s_addr = htonl(INADDR_ANY); else udp_conf.local_ip.s_addr = local.ipv4.s_addr; udp_conf.use_udp_checksums = false; ub->ifindex = dev->ifindex; b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr); b->mtu = b->media->mtu; #if IS_ENABLED(CONFIG_IPV6) } else if (local.proto == htons(ETH_P_IPV6)) { dev = ub->ifindex ? __dev_get_by_index(net, ub->ifindex) : NULL; dev = ipv6_dev_find(net, &local.ipv6, dev); if (!dev) { err = -ENODEV; goto err; } udp_conf.family = AF_INET6; udp_conf.use_udp6_tx_checksums = true; udp_conf.use_udp6_rx_checksums = true; if (rmcast) udp_conf.local_ip6 = in6addr_any; else udp_conf.local_ip6 = local.ipv6; ub->ifindex = dev->ifindex; b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr); b->mtu = 1280; #endif } else { err = -EAFNOSUPPORT; goto err; } udp_conf.local_udp_port = local.port; err = udp_sock_create(net, &udp_conf, &ub->ubsock); if (err) goto err; tuncfg.sk_user_data = ub; tuncfg.encap_type = 1; tuncfg.encap_rcv = tipc_udp_recv; tuncfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, ub->ubsock, &tuncfg); err = dst_cache_init(&ub->rcast.dst_cache, GFP_ATOMIC); if (err) goto free; /* * The bcast media address port is used for all peers and the ip * is used if it's a multicast address. */ memcpy(&b->bcast_addr.value, &remote, sizeof(remote)); if (rmcast) err = enable_mcast(ub, &remote); else err = tipc_udp_rcast_add(b, &remote); if (err) goto free; return 0; free: dst_cache_destroy(&ub->rcast.dst_cache); udp_tunnel_sock_release(ub->ubsock); err: kfree(ub); return err; } /* cleanup_bearer - break the socket/bearer association */ static void cleanup_bearer(struct work_struct *work) { struct udp_bearer *ub = container_of(work, struct udp_bearer, work); struct udp_replicast *rcast, *tmp; list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) { dst_cache_destroy(&rcast->dst_cache); list_del_rcu(&rcast->list); kfree_rcu(rcast, rcu); } atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count); dst_cache_destroy(&ub->rcast.dst_cache); udp_tunnel_sock_release(ub->ubsock); synchronize_net(); kfree(ub); } /* tipc_udp_disable - detach bearer from socket */ static void tipc_udp_disable(struct tipc_bearer *b) { struct udp_bearer *ub; ub = rtnl_dereference(b->media_ptr); if (!ub) { pr_err("UDP bearer instance not found\n"); return; } sock_set_flag(ub->ubsock->sk, SOCK_DEAD); RCU_INIT_POINTER(ub->bearer, NULL); /* sock_release need to be done outside of rtnl lock */ atomic_inc(&tipc_net(sock_net(ub->ubsock->sk))->wq_count); INIT_WORK(&ub->work, cleanup_bearer); schedule_work(&ub->work); } struct tipc_media udp_media_info = { .send_msg = tipc_udp_send_msg, .enable_media = tipc_udp_enable, .disable_media = tipc_udp_disable, .addr2str = tipc_udp_addr2str, .addr2msg = tipc_udp_addr2msg, .msg2addr = tipc_udp_msg2addr, .priority = TIPC_DEF_LINK_PRI, .tolerance = TIPC_DEF_LINK_TOL, .min_win = TIPC_DEF_LINK_WIN, .max_win = TIPC_DEF_LINK_WIN, .mtu = TIPC_DEF_LINK_UDP_MTU, .type_id = TIPC_MEDIA_TYPE_UDP, .hwaddr_len = 0, .name = "udp" }; |
| 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 | // SPDX-License-Identifier: GPL-2.0-only /* * Driver for DiBcom DiB3000MC/P-demodulator. * * Copyright (C) 2004-7 DiBcom (http://www.dibcom.fr/) * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * This code is partially based on the previous dib3000mc.c . */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <media/dvb_frontend.h> #include "dib3000mc.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); static int buggy_sfn_workaround; module_param(buggy_sfn_workaround, int, 0644); MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)"); #define dprintk(fmt, arg...) do { \ if (debug) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##arg); \ } while (0) struct dib3000mc_state { struct dvb_frontend demod; struct dib3000mc_config *cfg; u8 i2c_addr; struct i2c_adapter *i2c_adap; struct dibx000_i2c_master i2c_master; u32 timf; u32 current_bandwidth; u16 dev_id; u8 sfn_workaround_active :1; }; static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) { struct i2c_msg msg[2] = { { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 }, { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 }, }; u16 word; u8 *b; b = kmalloc(4, GFP_KERNEL); if (!b) return 0; b[0] = (reg >> 8) | 0x80; b[1] = reg; b[2] = 0; b[3] = 0; msg[0].buf = b; msg[1].buf = b + 2; if (i2c_transfer(state->i2c_adap, msg, 2) != 2) dprintk("i2c read error on %d\n",reg); word = (b[2] << 8) | b[3]; kfree(b); return word; } static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) { struct i2c_msg msg = { .addr = state->i2c_addr >> 1, .flags = 0, .len = 4 }; int rc; u8 *b; b = kmalloc(4, GFP_KERNEL); if (!b) return -ENOMEM; b[0] = reg >> 8; b[1] = reg; b[2] = val >> 8; b[3] = val; msg.buf = b; rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; kfree(b); return rc; } static int dib3000mc_identify(struct dib3000mc_state *state) { u16 value; if ((value = dib3000mc_read_word(state, 1025)) != 0x01b3) { dprintk("-E- DiB3000MC/P: wrong Vendor ID (read=0x%x)\n",value); return -EREMOTEIO; } value = dib3000mc_read_word(state, 1026); if (value != 0x3001 && value != 0x3002) { dprintk("-E- DiB3000MC/P: wrong Device ID (%x)\n",value); return -EREMOTEIO; } state->dev_id = value; dprintk("-I- found DiB3000MC/P: %x\n",state->dev_id); return 0; } static int dib3000mc_set_timing(struct dib3000mc_state *state, s16 nfft, u32 bw, u8 update_offset) { u32 timf; if (state->timf == 0) { timf = 1384402; // default value for 8MHz if (update_offset) msleep(200); // first time we do an update } else timf = state->timf; timf *= (bw / 1000); if (update_offset) { s16 tim_offs = dib3000mc_read_word(state, 416); if (tim_offs & 0x2000) tim_offs -= 0x4000; if (nfft == TRANSMISSION_MODE_2K) tim_offs *= 4; timf += tim_offs; state->timf = timf / (bw / 1000); } dprintk("timf: %d\n", timf); dib3000mc_write_word(state, 23, (u16) (timf >> 16)); dib3000mc_write_word(state, 24, (u16) (timf ) & 0xffff); return 0; } static int dib3000mc_setup_pwm_state(struct dib3000mc_state *state) { u16 reg_51, reg_52 = state->cfg->agc->setup & 0xfefb; if (state->cfg->pwm3_inversion) { reg_51 = (2 << 14) | (0 << 10) | (7 << 6) | (2 << 2) | (2 << 0); reg_52 |= (1 << 2); } else { reg_51 = (2 << 14) | (4 << 10) | (7 << 6) | (2 << 2) | (2 << 0); reg_52 |= (1 << 8); } dib3000mc_write_word(state, 51, reg_51); dib3000mc_write_word(state, 52, reg_52); if (state->cfg->use_pwm3) dib3000mc_write_word(state, 245, (1 << 3) | (1 << 0)); else dib3000mc_write_word(state, 245, 0); dib3000mc_write_word(state, 1040, 0x3); return 0; } static int dib3000mc_set_output_mode(struct dib3000mc_state *state, int mode) { int ret = 0; u16 fifo_threshold = 1792; u16 outreg = 0; u16 outmode = 0; u16 elecout = 1; u16 smo_reg = dib3000mc_read_word(state, 206) & 0x0010; /* keep the pid_parse bit */ dprintk("-I- Setting output mode for demod %p to %d\n", &state->demod, mode); switch (mode) { case OUTMODE_HIGH_Z: // disable elecout = 0; break; case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock outmode = 0; break; case OUTMODE_MPEG2_PAR_CONT_CLK: // STBs with parallel continues clock outmode = 1; break; case OUTMODE_MPEG2_SERIAL: // STBs with serial input outmode = 2; break; case OUTMODE_MPEG2_FIFO: // e.g. USB feeding elecout = 3; /*ADDR @ 206 : P_smo_error_discard [1;6:6] = 0 P_smo_rs_discard [1;5:5] = 0 P_smo_pid_parse [1;4:4] = 0 P_smo_fifo_flush [1;3:3] = 0 P_smo_mode [2;2:1] = 11 P_smo_ovf_prot [1;0:0] = 0 */ smo_reg |= 3 << 1; fifo_threshold = 512; outmode = 5; break; case OUTMODE_DIVERSITY: outmode = 4; elecout = 1; break; default: dprintk("Unhandled output_mode passed to be set for demod %p\n",&state->demod); outmode = 0; break; } if ((state->cfg->output_mpeg2_in_188_bytes)) smo_reg |= (1 << 5); // P_smo_rs_discard [1;5:5] = 1 outreg = dib3000mc_read_word(state, 244) & 0x07FF; outreg |= (outmode << 11); ret |= dib3000mc_write_word(state, 244, outreg); ret |= dib3000mc_write_word(state, 206, smo_reg); /*smo_ mode*/ ret |= dib3000mc_write_word(state, 207, fifo_threshold); /* synchronous fread */ ret |= dib3000mc_write_word(state, 1040, elecout); /* P_out_cfg */ return ret; } static int dib3000mc_set_bandwidth(struct dib3000mc_state *state, u32 bw) { u16 bw_cfg[6] = { 0 }; u16 imp_bw_cfg[3] = { 0 }; u16 reg; /* settings here are for 27.7MHz */ switch (bw) { case 8000: bw_cfg[0] = 0x0019; bw_cfg[1] = 0x5c30; bw_cfg[2] = 0x0054; bw_cfg[3] = 0x88a0; bw_cfg[4] = 0x01a6; bw_cfg[5] = 0xab20; imp_bw_cfg[0] = 0x04db; imp_bw_cfg[1] = 0x00db; imp_bw_cfg[2] = 0x00b7; break; case 7000: bw_cfg[0] = 0x001c; bw_cfg[1] = 0xfba5; bw_cfg[2] = 0x0060; bw_cfg[3] = 0x9c25; bw_cfg[4] = 0x01e3; bw_cfg[5] = 0x0cb7; imp_bw_cfg[0] = 0x04c0; imp_bw_cfg[1] = 0x00c0; imp_bw_cfg[2] = 0x00a0; break; case 6000: bw_cfg[0] = 0x0021; bw_cfg[1] = 0xd040; bw_cfg[2] = 0x0070; bw_cfg[3] = 0xb62b; bw_cfg[4] = 0x0233; bw_cfg[5] = 0x8ed5; imp_bw_cfg[0] = 0x04a5; imp_bw_cfg[1] = 0x00a5; imp_bw_cfg[2] = 0x0089; break; case 5000: bw_cfg[0] = 0x0028; bw_cfg[1] = 0x9380; bw_cfg[2] = 0x0087; bw_cfg[3] = 0x4100; bw_cfg[4] = 0x02a4; bw_cfg[5] = 0x4500; imp_bw_cfg[0] = 0x0489; imp_bw_cfg[1] = 0x0089; imp_bw_cfg[2] = 0x0072; break; default: return -EINVAL; } for (reg = 6; reg < 12; reg++) dib3000mc_write_word(state, reg, bw_cfg[reg - 6]); dib3000mc_write_word(state, 12, 0x0000); dib3000mc_write_word(state, 13, 0x03e8); dib3000mc_write_word(state, 14, 0x0000); dib3000mc_write_word(state, 15, 0x03f2); dib3000mc_write_word(state, 16, 0x0001); dib3000mc_write_word(state, 17, 0xb0d0); // P_sec_len dib3000mc_write_word(state, 18, 0x0393); dib3000mc_write_word(state, 19, 0x8700); for (reg = 55; reg < 58; reg++) dib3000mc_write_word(state, reg, imp_bw_cfg[reg - 55]); // Timing configuration dib3000mc_set_timing(state, TRANSMISSION_MODE_2K, bw, 0); return 0; } static u16 impulse_noise_val[29] = { 0x38, 0x6d9, 0x3f28, 0x7a7, 0x3a74, 0x196, 0x32a, 0x48c, 0x3ffe, 0x7f3, 0x2d94, 0x76, 0x53d, 0x3ff8, 0x7e3, 0x3320, 0x76, 0x5b3, 0x3feb, 0x7d2, 0x365e, 0x76, 0x48c, 0x3ffe, 0x5b3, 0x3feb, 0x76, 0x0000, 0xd }; static void dib3000mc_set_impulse_noise(struct dib3000mc_state *state, u8 mode, s16 nfft) { u16 i; for (i = 58; i < 87; i++) dib3000mc_write_word(state, i, impulse_noise_val[i-58]); if (nfft == TRANSMISSION_MODE_8K) { dib3000mc_write_word(state, 58, 0x3b); dib3000mc_write_word(state, 84, 0x00); dib3000mc_write_word(state, 85, 0x8200); } dib3000mc_write_word(state, 34, 0x1294); dib3000mc_write_word(state, 35, 0x1ff8); if (mode == 1) dib3000mc_write_word(state, 55, dib3000mc_read_word(state, 55) | (1 << 10)); } static int dib3000mc_init(struct dvb_frontend *demod) { struct dib3000mc_state *state = demod->demodulator_priv; struct dibx000_agc_config *agc = state->cfg->agc; // Restart Configuration dib3000mc_write_word(state, 1027, 0x8000); dib3000mc_write_word(state, 1027, 0x0000); // power up the demod + mobility configuration dib3000mc_write_word(state, 140, 0x0000); dib3000mc_write_word(state, 1031, 0); if (state->cfg->mobile_mode) { dib3000mc_write_word(state, 139, 0x0000); dib3000mc_write_word(state, 141, 0x0000); dib3000mc_write_word(state, 175, 0x0002); dib3000mc_write_word(state, 1032, 0x0000); } else { dib3000mc_write_word(state, 139, 0x0001); dib3000mc_write_word(state, 141, 0x0000); dib3000mc_write_word(state, 175, 0x0000); dib3000mc_write_word(state, 1032, 0x012C); } dib3000mc_write_word(state, 1033, 0x0000); // P_clk_cfg dib3000mc_write_word(state, 1037, 0x3130); // other configurations // P_ctrl_sfreq dib3000mc_write_word(state, 33, (5 << 0)); dib3000mc_write_word(state, 88, (1 << 10) | (0x10 << 0)); // Phase noise control // P_fft_phacor_inh, P_fft_phacor_cpe, P_fft_powrange dib3000mc_write_word(state, 99, (1 << 9) | (0x20 << 0)); if (state->cfg->phase_noise_mode == 0) dib3000mc_write_word(state, 111, 0x00); else dib3000mc_write_word(state, 111, 0x02); // P_agc_global dib3000mc_write_word(state, 50, 0x8000); // agc setup misc dib3000mc_setup_pwm_state(state); // P_agc_counter_lock dib3000mc_write_word(state, 53, 0x87); // P_agc_counter_unlock dib3000mc_write_word(state, 54, 0x87); /* agc */ dib3000mc_write_word(state, 36, state->cfg->max_time); dib3000mc_write_word(state, 37, (state->cfg->agc_command1 << 13) | (state->cfg->agc_command2 << 12) | (0x1d << 0)); dib3000mc_write_word(state, 38, state->cfg->pwm3_value); dib3000mc_write_word(state, 39, state->cfg->ln_adc_level); // set_agc_loop_Bw dib3000mc_write_word(state, 40, 0x0179); dib3000mc_write_word(state, 41, 0x03f0); dib3000mc_write_word(state, 42, agc->agc1_max); dib3000mc_write_word(state, 43, agc->agc1_min); dib3000mc_write_word(state, 44, agc->agc2_max); dib3000mc_write_word(state, 45, agc->agc2_min); dib3000mc_write_word(state, 46, (agc->agc1_pt1 << 8) | agc->agc1_pt2); dib3000mc_write_word(state, 47, (agc->agc1_slope1 << 8) | agc->agc1_slope2); dib3000mc_write_word(state, 48, (agc->agc2_pt1 << 8) | agc->agc2_pt2); dib3000mc_write_word(state, 49, (agc->agc2_slope1 << 8) | agc->agc2_slope2); // Begin: TimeOut registers // P_pha3_thres dib3000mc_write_word(state, 110, 3277); // P_timf_alpha = 6, P_corm_alpha = 6, P_corm_thres = 0x80 dib3000mc_write_word(state, 26, 0x6680); // lock_mask0 dib3000mc_write_word(state, 1, 4); // lock_mask1 dib3000mc_write_word(state, 2, 4); // lock_mask2 dib3000mc_write_word(state, 3, 0x1000); // P_search_maxtrial=1 dib3000mc_write_word(state, 5, 1); dib3000mc_set_bandwidth(state, 8000); // div_lock_mask dib3000mc_write_word(state, 4, 0x814); dib3000mc_write_word(state, 21, (1 << 9) | 0x164); dib3000mc_write_word(state, 22, 0x463d); // Spurious rm cfg // P_cspu_regul, P_cspu_win_cut dib3000mc_write_word(state, 120, 0x200f); // P_adp_selec_monit dib3000mc_write_word(state, 134, 0); // Fec cfg dib3000mc_write_word(state, 195, 0x10); // diversity register: P_dvsy_sync_wait.. dib3000mc_write_word(state, 180, 0x2FF0); // Impulse noise configuration dib3000mc_set_impulse_noise(state, 0, TRANSMISSION_MODE_8K); // output mode set-up dib3000mc_set_output_mode(state, OUTMODE_HIGH_Z); /* close the i2c-gate */ dib3000mc_write_word(state, 769, (1 << 7) ); return 0; } static int dib3000mc_sleep(struct dvb_frontend *demod) { struct dib3000mc_state *state = demod->demodulator_priv; dib3000mc_write_word(state, 1031, 0xFFFF); dib3000mc_write_word(state, 1032, 0xFFFF); dib3000mc_write_word(state, 1033, 0xFFF0); return 0; } static void dib3000mc_set_adp_cfg(struct dib3000mc_state *state, s16 qam) { u16 cfg[4] = { 0 },reg; switch (qam) { case QPSK: cfg[0] = 0x099a; cfg[1] = 0x7fae; cfg[2] = 0x0333; cfg[3] = 0x7ff0; break; case QAM_16: cfg[0] = 0x023d; cfg[1] = 0x7fdf; cfg[2] = 0x00a4; cfg[3] = 0x7ff0; break; case QAM_64: cfg[0] = 0x0148; cfg[1] = 0x7ff0; cfg[2] = 0x00a4; cfg[3] = 0x7ff8; break; } for (reg = 129; reg < 133; reg++) dib3000mc_write_word(state, reg, cfg[reg - 129]); } static void dib3000mc_set_channel_cfg(struct dib3000mc_state *state, struct dtv_frontend_properties *ch, u16 seq) { u16 value; u32 bw = BANDWIDTH_TO_KHZ(ch->bandwidth_hz); dib3000mc_set_bandwidth(state, bw); dib3000mc_set_timing(state, ch->transmission_mode, bw, 0); #if 1 dib3000mc_write_word(state, 100, (16 << 6) + 9); #else if (boost) dib3000mc_write_word(state, 100, (11 << 6) + 6); else dib3000mc_write_word(state, 100, (16 << 6) + 9); #endif dib3000mc_write_word(state, 1027, 0x0800); dib3000mc_write_word(state, 1027, 0x0000); //Default cfg isi offset adp dib3000mc_write_word(state, 26, 0x6680); dib3000mc_write_word(state, 29, 0x1273); dib3000mc_write_word(state, 33, 5); dib3000mc_set_adp_cfg(state, QAM_16); dib3000mc_write_word(state, 133, 15564); dib3000mc_write_word(state, 12 , 0x0); dib3000mc_write_word(state, 13 , 0x3e8); dib3000mc_write_word(state, 14 , 0x0); dib3000mc_write_word(state, 15 , 0x3f2); dib3000mc_write_word(state, 93,0); dib3000mc_write_word(state, 94,0); dib3000mc_write_word(state, 95,0); dib3000mc_write_word(state, 96,0); dib3000mc_write_word(state, 97,0); dib3000mc_write_word(state, 98,0); dib3000mc_set_impulse_noise(state, 0, ch->transmission_mode); value = 0; switch (ch->transmission_mode) { case TRANSMISSION_MODE_2K: value |= (0 << 7); break; default: case TRANSMISSION_MODE_8K: value |= (1 << 7); break; } switch (ch->guard_interval) { case GUARD_INTERVAL_1_32: value |= (0 << 5); break; case GUARD_INTERVAL_1_16: value |= (1 << 5); break; case GUARD_INTERVAL_1_4: value |= (3 << 5); break; default: case GUARD_INTERVAL_1_8: value |= (2 << 5); break; } switch (ch->modulation) { case QPSK: value |= (0 << 3); break; case QAM_16: value |= (1 << 3); break; default: case QAM_64: value |= (2 << 3); break; } switch (HIERARCHY_1) { case HIERARCHY_2: value |= 2; break; case HIERARCHY_4: value |= 4; break; default: case HIERARCHY_1: value |= 1; break; } dib3000mc_write_word(state, 0, value); dib3000mc_write_word(state, 5, (1 << 8) | ((seq & 0xf) << 4)); value = 0; if (ch->hierarchy == 1) value |= (1 << 4); if (1 == 1) value |= 1; switch ((ch->hierarchy == 0 || 1 == 1) ? ch->code_rate_HP : ch->code_rate_LP) { case FEC_2_3: value |= (2 << 1); break; case FEC_3_4: value |= (3 << 1); break; case FEC_5_6: value |= (5 << 1); break; case FEC_7_8: value |= (7 << 1); break; default: case FEC_1_2: value |= (1 << 1); break; } dib3000mc_write_word(state, 181, value); // diversity synchro delay add 50% SFN margin switch (ch->transmission_mode) { case TRANSMISSION_MODE_8K: value = 256; break; case TRANSMISSION_MODE_2K: default: value = 64; break; } switch (ch->guard_interval) { case GUARD_INTERVAL_1_16: value *= 2; break; case GUARD_INTERVAL_1_8: value *= 4; break; case GUARD_INTERVAL_1_4: value *= 8; break; default: case GUARD_INTERVAL_1_32: value *= 1; break; } value <<= 4; value |= dib3000mc_read_word(state, 180) & 0x000f; dib3000mc_write_word(state, 180, value); // restart demod value = dib3000mc_read_word(state, 0); dib3000mc_write_word(state, 0, value | (1 << 9)); dib3000mc_write_word(state, 0, value); msleep(30); dib3000mc_set_impulse_noise(state, state->cfg->impulse_noise_mode, ch->transmission_mode); } static int dib3000mc_autosearch_start(struct dvb_frontend *demod) { struct dtv_frontend_properties *chan = &demod->dtv_property_cache; struct dib3000mc_state *state = demod->demodulator_priv; u16 reg; // u32 val; struct dtv_frontend_properties schan; schan = *chan; /* TODO what is that ? */ /* a channel for autosearch */ schan.transmission_mode = TRANSMISSION_MODE_8K; schan.guard_interval = GUARD_INTERVAL_1_32; schan.modulation = QAM_64; schan.code_rate_HP = FEC_2_3; schan.code_rate_LP = FEC_2_3; schan.hierarchy = 0; dib3000mc_set_channel_cfg(state, &schan, 11); reg = dib3000mc_read_word(state, 0); dib3000mc_write_word(state, 0, reg | (1 << 8)); dib3000mc_read_word(state, 511); dib3000mc_write_word(state, 0, reg); return 0; } static int dib3000mc_autosearch_is_irq(struct dvb_frontend *demod) { struct dib3000mc_state *state = demod->demodulator_priv; u16 irq_pending = dib3000mc_read_word(state, 511); if (irq_pending & 0x1) // failed return 1; if (irq_pending & 0x2) // succeeded return 2; return 0; // still pending } static int dib3000mc_tune(struct dvb_frontend *demod) { struct dtv_frontend_properties *ch = &demod->dtv_property_cache; struct dib3000mc_state *state = demod->demodulator_priv; // ** configure demod ** dib3000mc_set_channel_cfg(state, ch, 0); // activates isi if (state->sfn_workaround_active) { dprintk("SFN workaround is active\n"); dib3000mc_write_word(state, 29, 0x1273); dib3000mc_write_word(state, 108, 0x4000); // P_pha3_force_pha_shift } else { dib3000mc_write_word(state, 29, 0x1073); dib3000mc_write_word(state, 108, 0x0000); // P_pha3_force_pha_shift } dib3000mc_set_adp_cfg(state, (u8)ch->modulation); if (ch->transmission_mode == TRANSMISSION_MODE_8K) { dib3000mc_write_word(state, 26, 38528); dib3000mc_write_word(state, 33, 8); } else { dib3000mc_write_word(state, 26, 30336); dib3000mc_write_word(state, 33, 6); } if (dib3000mc_read_word(state, 509) & 0x80) dib3000mc_set_timing(state, ch->transmission_mode, BANDWIDTH_TO_KHZ(ch->bandwidth_hz), 1); return 0; } struct i2c_adapter * dib3000mc_get_tuner_i2c_master(struct dvb_frontend *demod, int gating) { struct dib3000mc_state *st = demod->demodulator_priv; return dibx000_get_i2c_adapter(&st->i2c_master, DIBX000_I2C_INTERFACE_TUNER, gating); } EXPORT_SYMBOL(dib3000mc_get_tuner_i2c_master); static int dib3000mc_get_frontend(struct dvb_frontend* fe, struct dtv_frontend_properties *fep) { struct dib3000mc_state *state = fe->demodulator_priv; u16 tps = dib3000mc_read_word(state,458); fep->inversion = INVERSION_AUTO; fep->bandwidth_hz = state->current_bandwidth; switch ((tps >> 8) & 0x1) { case 0: fep->transmission_mode = TRANSMISSION_MODE_2K; break; case 1: fep->transmission_mode = TRANSMISSION_MODE_8K; break; } switch (tps & 0x3) { case 0: fep->guard_interval = GUARD_INTERVAL_1_32; break; case 1: fep->guard_interval = GUARD_INTERVAL_1_16; break; case 2: fep->guard_interval = GUARD_INTERVAL_1_8; break; case 3: fep->guard_interval = GUARD_INTERVAL_1_4; break; } switch ((tps >> 13) & 0x3) { case 0: fep->modulation = QPSK; break; case 1: fep->modulation = QAM_16; break; case 2: default: fep->modulation = QAM_64; break; } /* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */ /* (tps >> 12) & 0x1 == hrch is used, (tps >> 9) & 0x7 == alpha */ fep->hierarchy = HIERARCHY_NONE; switch ((tps >> 5) & 0x7) { case 1: fep->code_rate_HP = FEC_1_2; break; case 2: fep->code_rate_HP = FEC_2_3; break; case 3: fep->code_rate_HP = FEC_3_4; break; case 5: fep->code_rate_HP = FEC_5_6; break; case 7: default: fep->code_rate_HP = FEC_7_8; break; } switch ((tps >> 2) & 0x7) { case 1: fep->code_rate_LP = FEC_1_2; break; case 2: fep->code_rate_LP = FEC_2_3; break; case 3: fep->code_rate_LP = FEC_3_4; break; case 5: fep->code_rate_LP = FEC_5_6; break; case 7: default: fep->code_rate_LP = FEC_7_8; break; } return 0; } static int dib3000mc_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct dib3000mc_state *state = fe->demodulator_priv; int ret; dib3000mc_set_output_mode(state, OUTMODE_HIGH_Z); state->current_bandwidth = fep->bandwidth_hz; dib3000mc_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->bandwidth_hz)); /* maybe the parameter has been changed */ state->sfn_workaround_active = buggy_sfn_workaround; if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); msleep(100); } if (fep->transmission_mode == TRANSMISSION_MODE_AUTO || fep->guard_interval == GUARD_INTERVAL_AUTO || fep->modulation == QAM_AUTO || fep->code_rate_HP == FEC_AUTO) { int i = 1000, found; dib3000mc_autosearch_start(fe); do { msleep(1); found = dib3000mc_autosearch_is_irq(fe); } while (found == 0 && i--); dprintk("autosearch returns: %d\n",found); if (found == 0 || found == 1) return 0; // no channel found dib3000mc_get_frontend(fe, fep); } ret = dib3000mc_tune(fe); /* make this a config parameter */ dib3000mc_set_output_mode(state, OUTMODE_MPEG2_FIFO); return ret; } static int dib3000mc_read_status(struct dvb_frontend *fe, enum fe_status *stat) { struct dib3000mc_state *state = fe->demodulator_priv; u16 lock = dib3000mc_read_word(state, 509); *stat = 0; if (lock & 0x8000) *stat |= FE_HAS_SIGNAL; if (lock & 0x3000) *stat |= FE_HAS_CARRIER; if (lock & 0x0100) *stat |= FE_HAS_VITERBI; if (lock & 0x0010) *stat |= FE_HAS_SYNC; if (lock & 0x0008) *stat |= FE_HAS_LOCK; return 0; } static int dib3000mc_read_ber(struct dvb_frontend *fe, u32 *ber) { struct dib3000mc_state *state = fe->demodulator_priv; *ber = (dib3000mc_read_word(state, 500) << 16) | dib3000mc_read_word(state, 501); return 0; } static int dib3000mc_read_unc_blocks(struct dvb_frontend *fe, u32 *unc) { struct dib3000mc_state *state = fe->demodulator_priv; *unc = dib3000mc_read_word(state, 508); return 0; } static int dib3000mc_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct dib3000mc_state *state = fe->demodulator_priv; u16 val = dib3000mc_read_word(state, 392); *strength = 65535 - val; return 0; } static int dib3000mc_read_snr(struct dvb_frontend* fe, u16 *snr) { *snr = 0x0000; return 0; } static int dib3000mc_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static void dib3000mc_release(struct dvb_frontend *fe) { struct dib3000mc_state *state = fe->demodulator_priv; dibx000_exit_i2c_master(&state->i2c_master); kfree(state); } int dib3000mc_pid_control(struct dvb_frontend *fe, int index, int pid,int onoff) { struct dib3000mc_state *state = fe->demodulator_priv; dib3000mc_write_word(state, 212 + index, onoff ? (1 << 13) | pid : 0); return 0; } EXPORT_SYMBOL(dib3000mc_pid_control); int dib3000mc_pid_parse(struct dvb_frontend *fe, int onoff) { struct dib3000mc_state *state = fe->demodulator_priv; u16 tmp = dib3000mc_read_word(state, 206) & ~(1 << 4); tmp |= (onoff << 4); return dib3000mc_write_word(state, 206, tmp); } EXPORT_SYMBOL(dib3000mc_pid_parse); void dib3000mc_set_config(struct dvb_frontend *fe, struct dib3000mc_config *cfg) { struct dib3000mc_state *state = fe->demodulator_priv; state->cfg = cfg; } EXPORT_SYMBOL(dib3000mc_set_config); int dib3000mc_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib3000mc_config cfg[]) { struct dib3000mc_state *dmcst; int k; u8 new_addr; static const u8 DIB3000MC_I2C_ADDRESS[] = { 20, 22, 24, 26 }; dmcst = kzalloc(sizeof(struct dib3000mc_state), GFP_KERNEL); if (dmcst == NULL) return -ENOMEM; dmcst->i2c_adap = i2c; for (k = no_of_demods-1; k >= 0; k--) { dmcst->cfg = &cfg[k]; /* designated i2c address */ new_addr = DIB3000MC_I2C_ADDRESS[k]; dmcst->i2c_addr = new_addr; if (dib3000mc_identify(dmcst) != 0) { dmcst->i2c_addr = default_addr; if (dib3000mc_identify(dmcst) != 0) { dprintk("-E- DiB3000P/MC #%d: not identified\n", k); kfree(dmcst); return -ENODEV; } } dib3000mc_set_output_mode(dmcst, OUTMODE_MPEG2_PAR_CONT_CLK); // set new i2c address and force divstr (Bit 1) to value 0 (Bit 0) dib3000mc_write_word(dmcst, 1024, (new_addr << 3) | 0x1); dmcst->i2c_addr = new_addr; } for (k = 0; k < no_of_demods; k++) { dmcst->cfg = &cfg[k]; dmcst->i2c_addr = DIB3000MC_I2C_ADDRESS[k]; dib3000mc_write_word(dmcst, 1024, dmcst->i2c_addr << 3); /* turn off data output */ dib3000mc_set_output_mode(dmcst, OUTMODE_HIGH_Z); } kfree(dmcst); return 0; } EXPORT_SYMBOL(dib3000mc_i2c_enumeration); static const struct dvb_frontend_ops dib3000mc_ops; struct dvb_frontend * dib3000mc_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib3000mc_config *cfg) { struct dvb_frontend *demod; struct dib3000mc_state *st; st = kzalloc(sizeof(struct dib3000mc_state), GFP_KERNEL); if (st == NULL) return NULL; st->cfg = cfg; st->i2c_adap = i2c_adap; st->i2c_addr = i2c_addr; demod = &st->demod; demod->demodulator_priv = st; memcpy(&st->demod.ops, &dib3000mc_ops, sizeof(struct dvb_frontend_ops)); if (dib3000mc_identify(st) != 0) goto error; dibx000_init_i2c_master(&st->i2c_master, DIB3000MC, st->i2c_adap, st->i2c_addr); dib3000mc_write_word(st, 1037, 0x3130); return demod; error: kfree(st); return NULL; } EXPORT_SYMBOL_GPL(dib3000mc_attach); static const struct dvb_frontend_ops dib3000mc_ops = { .delsys = { SYS_DVBT }, .info = { .name = "DiBcom 3000MC/P", .frequency_min_hz = 44250 * kHz, .frequency_max_hz = 867250 * kHz, .frequency_stepsize_hz = 62500, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = dib3000mc_release, .init = dib3000mc_init, .sleep = dib3000mc_sleep, .set_frontend = dib3000mc_set_frontend, .get_tune_settings = dib3000mc_fe_get_tune_settings, .get_frontend = dib3000mc_get_frontend, .read_status = dib3000mc_read_status, .read_ber = dib3000mc_read_ber, .read_signal_strength = dib3000mc_read_signal_strength, .read_snr = dib3000mc_read_snr, .read_ucblocks = dib3000mc_read_unc_blocks, }; MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for the DiBcom 3000MC/P COFDM demodulator"); MODULE_LICENSE("GPL"); |
| 12 133 64 2 2 7 3 154 103 29 30 5 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_UDP_TUNNEL_H #define __NET_UDP_TUNNEL_H #include <net/ip_tunnels.h> #include <net/udp.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> #include <net/ipv6_stubs.h> #endif struct udp_port_cfg { u8 family; /* Used only for kernel-created sockets */ union { struct in_addr local_ip; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr local_ip6; #endif }; union { struct in_addr peer_ip; #if IS_ENABLED(CONFIG_IPV6) struct in6_addr peer_ip6; #endif }; __be16 local_udp_port; __be16 peer_udp_port; int bind_ifindex; unsigned int use_udp_checksums:1, use_udp6_tx_checksums:1, use_udp6_rx_checksums:1, ipv6_v6only:1; }; int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp); #if IS_ENABLED(CONFIG_IPV6) int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp); #else static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { return 0; } #endif static inline int udp_sock_create(struct net *net, struct udp_port_cfg *cfg, struct socket **sockp) { if (cfg->family == AF_INET) return udp_sock_create4(net, cfg, sockp); if (cfg->family == AF_INET6) return udp_sock_create6(net, cfg, sockp); return -EPFNOSUPPORT; } typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk, struct sk_buff *skb); typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload); typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk, struct list_head *head, struct sk_buff *skb); typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb, int nhoff); struct udp_tunnel_sock_cfg { void *sk_user_data; /* user data used by encap_rcv call back */ /* Used for setting up udp_sock fields, see udp.h for details */ __u8 encap_type; udp_tunnel_encap_rcv_t encap_rcv; udp_tunnel_encap_err_lookup_t encap_err_lookup; udp_tunnel_encap_err_rcv_t encap_err_rcv; udp_tunnel_encap_destroy_t encap_destroy; udp_tunnel_gro_receive_t gro_receive; udp_tunnel_gro_complete_t gro_complete; }; /* Setup the given (UDP) sock to receive UDP encapsulated packets */ void setup_udp_tunnel_sock(struct net *net, struct socket *sock, struct udp_tunnel_sock_cfg *sock_cfg); /* -- List of parsable UDP tunnel types -- * * Adding to this list will result in serious debate. The main issue is * that this list is essentially a list of workarounds for either poorly * designed tunnels, or poorly designed device offloads. * * The parsing supported via these types should really be used for Rx * traffic only as the network stack will have already inserted offsets for * the location of the headers in the skb. In addition any ports that are * pushed should be kept within the namespace without leaking to other * devices such as VFs or other ports on the same device. * * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the * need to use this for Rx checksum offload. It should not be necessary to * call this function to perform Tx offloads on outgoing traffic. */ enum udp_parsable_tunnel_type { UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */ UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */ UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */ }; struct udp_tunnel_info { unsigned short type; sa_family_t sa_family; __be16 port; u8 hw_priv; }; /* Notify network devices of offloadable types */ void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, unsigned short type); void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, unsigned short type); void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type); void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type); static inline void udp_tunnel_get_rx_info(struct net_device *dev) { ASSERT_RTNL(); if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev); } static inline void udp_tunnel_drop_rx_info(struct net_device *dev) { ASSERT_RTNL(); if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev); } /* Transmit the skb using UDP encapsulation. */ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, bool xnet, bool nocheck); int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct in6_addr *saddr, const struct in6_addr *daddr, __u8 prio, __u8 ttl, __be32 label, __be16 src_port, __be16 dst_port, bool nocheck); void udp_tunnel_sock_release(struct socket *sock); struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb, struct net_device *dev, struct net *net, int oif, __be32 *saddr, const struct ip_tunnel_key *key, __be16 sport, __be16 dport, u8 tos, struct dst_cache *dst_cache); struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb, struct net_device *dev, struct net *net, struct socket *sock, int oif, struct in6_addr *saddr, const struct ip_tunnel_key *key, __be16 sport, __be16 dport, u8 dsfield, struct dst_cache *dst_cache); struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family, const unsigned long *flags, __be64 tunnel_id, int md_size); #ifdef CONFIG_INET static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum) { int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; return iptunnel_handle_offloads(skb, type); } #endif static inline void udp_tunnel_encap_enable(struct sock *sk) { if (udp_test_and_set_bit(ENCAP_ENABLED, sk)) return; #if IS_ENABLED(CONFIG_IPV6) if (READ_ONCE(sk->sk_family) == PF_INET6) ipv6_stub->udpv6_encap_enable(); #endif udp_encap_enable(); } #define UDP_TUNNEL_NIC_MAX_TABLES 4 enum udp_tunnel_nic_info_flags { /* Device callbacks may sleep */ UDP_TUNNEL_NIC_INFO_MAY_SLEEP = BIT(0), /* Device only supports offloads when it's open, all ports * will be removed before close and re-added after open. */ UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1), /* Device supports only IPv4 tunnels */ UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2), /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN. * This port must not be counted towards n_entries of any table. * Driver will not receive any callback associated with port 4789. */ UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3), }; struct udp_tunnel_nic; #define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2) struct udp_tunnel_nic_shared { struct udp_tunnel_nic *udp_tunnel_nic_info; struct list_head devices; }; struct udp_tunnel_nic_shared_node { struct net_device *dev; struct list_head list; }; /** * struct udp_tunnel_nic_info - driver UDP tunnel offload information * @set_port: callback for adding a new port * @unset_port: callback for removing a port * @sync_table: callback for syncing the entire port table at once * @shared: reference to device global state (optional) * @flags: device flags from enum udp_tunnel_nic_info_flags * @tables: UDP port tables this device has * @tables.n_entries: number of entries in this table * @tables.tunnel_types: types of tunnels this table accepts * * Drivers are expected to provide either @set_port and @unset_port callbacks * or the @sync_table callback. Callbacks are invoked with rtnl lock held. * * Devices which (misguidedly) share the UDP tunnel port table across multiple * netdevs should allocate an instance of struct udp_tunnel_nic_shared and * point @shared at it. * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices * sharing a table. * * Known limitations: * - UDP tunnel port notifications are fundamentally best-effort - * it is likely the driver will both see skbs which use a UDP tunnel port, * while not being a tunneled skb, and tunnel skbs from other ports - * drivers should only use these ports for non-critical RX-side offloads, * e.g. the checksum offload; * - none of the devices care about the socket family at present, so we don't * track it. Please extend this code if you care. */ struct udp_tunnel_nic_info { /* one-by-one */ int (*set_port)(struct net_device *dev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti); int (*unset_port)(struct net_device *dev, unsigned int table, unsigned int entry, struct udp_tunnel_info *ti); /* all at once */ int (*sync_table)(struct net_device *dev, unsigned int table); struct udp_tunnel_nic_shared *shared; unsigned int flags; struct udp_tunnel_nic_table_info { unsigned int n_entries; unsigned int tunnel_types; } tables[UDP_TUNNEL_NIC_MAX_TABLES]; }; /* UDP tunnel module dependencies * * Tunnel drivers are expected to have a hard dependency on the udp_tunnel * module. NIC drivers are not, they just attach their * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come. * Loading a tunnel driver will cause the udp_tunnel module to be loaded * and only then will all the required state structures be allocated. * Since we want a weak dependency from the drivers and the core to udp_tunnel * we call things through the following stubs. */ struct udp_tunnel_nic_ops { void (*get_port)(struct net_device *dev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti); void (*set_port_priv)(struct net_device *dev, unsigned int table, unsigned int idx, u8 priv); void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti); void (*reset_ntf)(struct net_device *dev); size_t (*dump_size)(struct net_device *dev, unsigned int table); int (*dump_write)(struct net_device *dev, unsigned int table, struct sk_buff *skb); }; #ifdef CONFIG_INET extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops; #else #define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL) #endif static inline void udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table, unsigned int idx, struct udp_tunnel_info *ti) { /* This helper is used from .sync_table, we indicate empty entries * by zero'ed @ti. Drivers which need to know the details of a port * when it gets deleted should use the .set_port / .unset_port * callbacks. * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings. */ memset(ti, 0, sizeof(*ti)); if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->get_port(dev, table, idx, ti); } static inline void udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table, unsigned int idx, u8 priv) { if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv); } static inline void udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti) { if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->add_port(dev, ti); } static inline void udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti) { if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->del_port(dev, ti); } /** * udp_tunnel_nic_reset_ntf() - device-originating reset notification * @dev: network interface device structure * * Called by the driver to inform the core that the entire UDP tunnel port * state has been lost, usually due to device reset. Core will assume device * forgot all the ports and issue .set_port and .sync_table callbacks as * necessary. * * This function must be called with rtnl lock held, and will issue all * the callbacks before returning. */ static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev) { if (udp_tunnel_nic_ops) udp_tunnel_nic_ops->reset_ntf(dev); } static inline size_t udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table) { if (!udp_tunnel_nic_ops) return 0; return udp_tunnel_nic_ops->dump_size(dev, table); } static inline int udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table, struct sk_buff *skb) { if (!udp_tunnel_nic_ops) return 0; return udp_tunnel_nic_ops->dump_write(dev, table, skb); } #endif |
| 3 4 3 1 3 3 3 3 2 3 2 2 2 3 3 3 3 2 2 2 2 2 4 6 6 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 | // SPDX-License-Identifier: GPL-2.0-only /* * Accelerated GHASH implementation with Intel PCLMULQDQ-NI * instructions. This file contains glue code. * * Copyright (c) 2009 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> */ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/cryptd.h> #include <crypto/gf128mul.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <asm/cpu_device_id.h> #include <asm/simd.h> #include <asm/unaligned.h> #define GHASH_BLOCK_SIZE 16 #define GHASH_DIGEST_SIZE 16 void clmul_ghash_mul(char *dst, const le128 *shash); void clmul_ghash_update(char *dst, const char *src, unsigned int srclen, const le128 *shash); struct ghash_async_ctx { struct cryptd_ahash *cryptd_tfm; }; struct ghash_ctx { le128 shash; }; struct ghash_desc_ctx { u8 buffer[GHASH_BLOCK_SIZE]; u32 bytes; }; static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); memset(dctx, 0, sizeof(*dctx)); return 0; } static int ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct ghash_ctx *ctx = crypto_shash_ctx(tfm); u64 a, b; if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; /* * GHASH maps bits to polynomial coefficients backwards, which makes it * hard to implement. But it can be shown that the GHASH multiplication * * D * K (mod x^128 + x^7 + x^2 + x + 1) * * (where D is a data block and K is the key) is equivalent to: * * bitreflect(D) * bitreflect(K) * x^(-127) * (mod x^128 + x^127 + x^126 + x^121 + 1) * * So, the code below precomputes: * * bitreflect(K) * x^(-127) (mod x^128 + x^127 + x^126 + x^121 + 1) * * ... but in Montgomery form (so that Montgomery multiplication can be * used), i.e. with an extra x^128 factor, which means actually: * * bitreflect(K) * x (mod x^128 + x^127 + x^126 + x^121 + 1) * * The within-a-byte part of bitreflect() cancels out GHASH's built-in * reflection, and thus bitreflect() is actually a byteswap. */ a = get_unaligned_be64(key); b = get_unaligned_be64(key + 8); ctx->shash.a = cpu_to_le64((a << 1) | (b >> 63)); ctx->shash.b = cpu_to_le64((b << 1) | (a >> 63)); if (a >> 63) ctx->shash.a ^= cpu_to_le64((u64)0xc2 << 56); return 0; } static int ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; kernel_fpu_begin(); if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); dctx->bytes -= n; srclen -= n; while (n--) *pos++ ^= *src++; if (!dctx->bytes) clmul_ghash_mul(dst, &ctx->shash); } clmul_ghash_update(dst, src, srclen, &ctx->shash); kernel_fpu_end(); if (srclen & 0xf) { src += srclen - (srclen & 0xf); srclen &= 0xf; dctx->bytes = GHASH_BLOCK_SIZE - srclen; while (srclen--) *dst++ ^= *src++; } return 0; } static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) { u8 *dst = dctx->buffer; if (dctx->bytes) { u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); while (dctx->bytes--) *tmp++ ^= 0; kernel_fpu_begin(); clmul_ghash_mul(dst, &ctx->shash); kernel_fpu_end(); } dctx->bytes = 0; } static int ghash_final(struct shash_desc *desc, u8 *dst) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE); return 0; } static struct shash_alg ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = ghash_init, .update = ghash_update, .final = ghash_final, .setkey = ghash_setkey, .descsize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "__ghash", .cra_driver_name = "__ghash-pclmulqdqni", .cra_priority = 0, .cra_flags = CRYPTO_ALG_INTERNAL, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ghash_ctx), .cra_module = THIS_MODULE, }, }; static int ghash_async_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); desc->tfm = child; return crypto_shash_init(desc); } static int ghash_async_update(struct ahash_request *req) { struct ahash_request *cryptd_req = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_update(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); return shash_ahash_update(req, desc); } } static int ghash_async_final(struct ahash_request *req) { struct ahash_request *cryptd_req = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_final(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); return crypto_shash_final(desc, req->result); } } static int ghash_async_import(struct ahash_request *req, const void *in) { struct ahash_request *cryptd_req = ahash_request_ctx(req); struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ghash_async_init(req); memcpy(dctx, in, sizeof(*dctx)); return 0; } static int ghash_async_export(struct ahash_request *req, void *out) { struct ahash_request *cryptd_req = ahash_request_ctx(req); struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); memcpy(out, dctx, sizeof(*dctx)); return 0; } static int ghash_async_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ahash_request *cryptd_req = ahash_request_ctx(req); struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; if (!crypto_simd_usable() || (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { memcpy(cryptd_req, req, sizeof(*req)); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); return crypto_ahash_digest(cryptd_req); } else { struct shash_desc *desc = cryptd_shash_desc(cryptd_req); struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm); desc->tfm = child; return shash_ahash_digest(req, desc); } } static int ghash_async_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct crypto_ahash *child = &ctx->cryptd_tfm->base; crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_ahash_set_flags(child, crypto_ahash_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); return crypto_ahash_setkey(child, key, keylen); } static int ghash_async_init_tfm(struct crypto_tfm *tfm) { struct cryptd_ahash *cryptd_tfm; struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); cryptd_tfm = cryptd_alloc_ahash("__ghash-pclmulqdqni", CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); ctx->cryptd_tfm = cryptd_tfm; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct ahash_request) + crypto_ahash_reqsize(&cryptd_tfm->base)); return 0; } static void ghash_async_exit_tfm(struct crypto_tfm *tfm) { struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); cryptd_free_ahash(ctx->cryptd_tfm); } static struct ahash_alg ghash_async_alg = { .init = ghash_async_init, .update = ghash_async_update, .final = ghash_async_final, .setkey = ghash_async_setkey, .digest = ghash_async_digest, .export = ghash_async_export, .import = ghash_async_import, .halg = { .digestsize = GHASH_DIGEST_SIZE, .statesize = sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "ghash-clmulni", .cra_priority = 400, .cra_ctxsize = sizeof(struct ghash_async_ctx), .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_init = ghash_async_init_tfm, .cra_exit = ghash_async_exit_tfm, }, }, }; static const struct x86_cpu_id pcmul_cpu_id[] = { X86_MATCH_FEATURE(X86_FEATURE_PCLMULQDQ, NULL), /* Pickle-Mickle-Duck */ {} }; MODULE_DEVICE_TABLE(x86cpu, pcmul_cpu_id); static int __init ghash_pclmulqdqni_mod_init(void) { int err; if (!x86_match_cpu(pcmul_cpu_id)) return -ENODEV; err = crypto_register_shash(&ghash_alg); if (err) goto err_out; err = crypto_register_ahash(&ghash_async_alg); if (err) goto err_shash; return 0; err_shash: crypto_unregister_shash(&ghash_alg); err_out: return err; } static void __exit ghash_pclmulqdqni_mod_exit(void) { crypto_unregister_ahash(&ghash_async_alg); crypto_unregister_shash(&ghash_alg); } module_init(ghash_pclmulqdqni_mod_init); module_exit(ghash_pclmulqdqni_mod_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH hash function, accelerated by PCLMULQDQ-NI"); MODULE_ALIAS_CRYPTO("ghash"); |
| 2 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | /* SPDX-License-Identifier: GPL-2.0 */ /* * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2021 Intel Corporation */ #include <asm/unaligned.h> void eir_create(struct hci_dev *hdev, u8 *data); u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len); u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len); u8 eir_append_service_data(u8 *eir, u16 eir_len, u16 uuid, u8 *data, u8 data_len); static inline u16 eir_precalc_len(u8 data_len) { return sizeof(u8) * 2 + data_len; } static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len) { eir[eir_len++] = sizeof(type) + data_len; eir[eir_len++] = type; memcpy(&eir[eir_len], data, data_len); eir_len += data_len; return eir_len; } static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) { eir[eir_len++] = sizeof(type) + sizeof(data); eir[eir_len++] = type; put_unaligned_le16(data, &eir[eir_len]); eir_len += sizeof(data); return eir_len; } static inline u16 eir_skb_put_data(struct sk_buff *skb, u8 type, u8 *data, u8 data_len) { u8 *eir; u16 eir_len; eir_len = eir_precalc_len(data_len); eir = skb_put(skb, eir_len); WARN_ON(sizeof(type) + data_len > U8_MAX); eir[0] = sizeof(type) + data_len; eir[1] = type; memcpy(&eir[2], data, data_len); return eir_len; } static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, size_t *data_len) { size_t parsed = 0; if (eir_len < 2) return NULL; while (parsed < eir_len - 1) { u8 field_len = eir[0]; if (field_len == 0) break; parsed += field_len + 1; if (parsed > eir_len) break; if (eir[1] != type) { eir += field_len + 1; continue; } /* Zero length data */ if (field_len == 1) return NULL; if (data_len) *data_len = field_len - 1; return &eir[2]; } return NULL; } void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len); |
| 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2016 Mellanox Technologies. All rights reserved. * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com> */ #include "devl_internal.h" static const struct devlink_param devlink_param_generic[] = { { .id = DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET, .name = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME, .type = DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_MAX_MACS, .name = DEVLINK_PARAM_GENERIC_MAX_MACS_NAME, .type = DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, .name = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT, .name = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME, .type = DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, .name = DEVLINK_PARAM_GENERIC_IGNORE_ARI_NAME, .type = DEVLINK_PARAM_GENERIC_IGNORE_ARI_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_NAME, .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MAX_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, .name = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_NAME, .type = DEVLINK_PARAM_GENERIC_MSIX_VEC_PER_PF_MIN_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, .name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME, .type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE, .name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME, .type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, .name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET, .name = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, .name = DEVLINK_PARAM_GENERIC_ENABLE_ETH_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_ETH_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, .name = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_RDMA_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, .name = DEVLINK_PARAM_GENERIC_ENABLE_VNET_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_VNET_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP, .name = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_NAME, .type = DEVLINK_PARAM_GENERIC_ENABLE_IWARP_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE, .name = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_NAME, .type = DEVLINK_PARAM_GENERIC_IO_EQ_SIZE_TYPE, }, { .id = DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE, .name = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_NAME, .type = DEVLINK_PARAM_GENERIC_EVENT_EQ_SIZE_TYPE, }, }; static int devlink_param_generic_verify(const struct devlink_param *param) { /* verify it match generic parameter by id and name */ if (param->id > DEVLINK_PARAM_GENERIC_ID_MAX) return -EINVAL; if (strcmp(param->name, devlink_param_generic[param->id].name)) return -ENOENT; WARN_ON(param->type != devlink_param_generic[param->id].type); return 0; } static int devlink_param_driver_verify(const struct devlink_param *param) { int i; if (param->id <= DEVLINK_PARAM_GENERIC_ID_MAX) return -EINVAL; /* verify no such name in generic params */ for (i = 0; i <= DEVLINK_PARAM_GENERIC_ID_MAX; i++) if (!strcmp(param->name, devlink_param_generic[i].name)) return -EEXIST; return 0; } static struct devlink_param_item * devlink_param_find_by_name(struct xarray *params, const char *param_name) { struct devlink_param_item *param_item; unsigned long param_id; xa_for_each(params, param_id, param_item) { if (!strcmp(param_item->param->name, param_name)) return param_item; } return NULL; } static struct devlink_param_item * devlink_param_find_by_id(struct xarray *params, u32 param_id) { return xa_load(params, param_id); } static bool devlink_param_cmode_is_supported(const struct devlink_param *param, enum devlink_param_cmode cmode) { return test_bit(cmode, ¶m->supported_cmodes); } static int devlink_param_get(struct devlink *devlink, const struct devlink_param *param, struct devlink_param_gset_ctx *ctx) { if (!param->get) return -EOPNOTSUPP; return param->get(devlink, param->id, ctx); } static int devlink_param_set(struct devlink *devlink, const struct devlink_param *param, struct devlink_param_gset_ctx *ctx, struct netlink_ext_ack *extack) { if (!param->set) return -EOPNOTSUPP; return param->set(devlink, param->id, ctx, extack); } static int devlink_param_type_to_nla_type(enum devlink_param_type param_type) { switch (param_type) { case DEVLINK_PARAM_TYPE_U8: return NLA_U8; case DEVLINK_PARAM_TYPE_U16: return NLA_U16; case DEVLINK_PARAM_TYPE_U32: return NLA_U32; case DEVLINK_PARAM_TYPE_STRING: return NLA_STRING; case DEVLINK_PARAM_TYPE_BOOL: return NLA_FLAG; default: return -EINVAL; } } static int devlink_nl_param_value_fill_one(struct sk_buff *msg, enum devlink_param_type type, enum devlink_param_cmode cmode, union devlink_param_value val) { struct nlattr *param_value_attr; param_value_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM_VALUE); if (!param_value_attr) goto nla_put_failure; if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_CMODE, cmode)) goto value_nest_cancel; switch (type) { case DEVLINK_PARAM_TYPE_U8: if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu8)) goto value_nest_cancel; break; case DEVLINK_PARAM_TYPE_U16: if (nla_put_u16(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu16)) goto value_nest_cancel; break; case DEVLINK_PARAM_TYPE_U32: if (nla_put_u32(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vu32)) goto value_nest_cancel; break; case DEVLINK_PARAM_TYPE_STRING: if (nla_put_string(msg, DEVLINK_ATTR_PARAM_VALUE_DATA, val.vstr)) goto value_nest_cancel; break; case DEVLINK_PARAM_TYPE_BOOL: if (val.vbool && nla_put_flag(msg, DEVLINK_ATTR_PARAM_VALUE_DATA)) goto value_nest_cancel; break; } nla_nest_end(msg, param_value_attr); return 0; value_nest_cancel: nla_nest_cancel(msg, param_value_attr); nla_put_failure: return -EMSGSIZE; } static int devlink_nl_param_fill(struct sk_buff *msg, struct devlink *devlink, unsigned int port_index, struct devlink_param_item *param_item, enum devlink_command cmd, u32 portid, u32 seq, int flags) { union devlink_param_value param_value[DEVLINK_PARAM_CMODE_MAX + 1]; bool param_value_set[DEVLINK_PARAM_CMODE_MAX + 1] = {}; const struct devlink_param *param = param_item->param; struct devlink_param_gset_ctx ctx; struct nlattr *param_values_list; struct nlattr *param_attr; int nla_type; void *hdr; int err; int i; /* Get value from driver part to driverinit configuration mode */ for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) { if (!devlink_param_cmode_is_supported(param, i)) continue; if (i == DEVLINK_PARAM_CMODE_DRIVERINIT) { if (param_item->driverinit_value_new_valid) param_value[i] = param_item->driverinit_value_new; else if (param_item->driverinit_value_valid) param_value[i] = param_item->driverinit_value; else return -EOPNOTSUPP; } else { ctx.cmode = i; err = devlink_param_get(devlink, param, &ctx); if (err) return err; param_value[i] = ctx.val; } param_value_set[i] = true; } hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); if (!hdr) return -EMSGSIZE; if (devlink_nl_put_handle(msg, devlink)) goto genlmsg_cancel; if (cmd == DEVLINK_CMD_PORT_PARAM_GET || cmd == DEVLINK_CMD_PORT_PARAM_NEW || cmd == DEVLINK_CMD_PORT_PARAM_DEL) if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, port_index)) goto genlmsg_cancel; param_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM); if (!param_attr) goto genlmsg_cancel; if (nla_put_string(msg, DEVLINK_ATTR_PARAM_NAME, param->name)) goto param_nest_cancel; if (param->generic && nla_put_flag(msg, DEVLINK_ATTR_PARAM_GENERIC)) goto param_nest_cancel; nla_type = devlink_param_type_to_nla_type(param->type); if (nla_type < 0) goto param_nest_cancel; if (nla_put_u8(msg, DEVLINK_ATTR_PARAM_TYPE, nla_type)) goto param_nest_cancel; param_values_list = nla_nest_start_noflag(msg, DEVLINK_ATTR_PARAM_VALUES_LIST); if (!param_values_list) goto param_nest_cancel; for (i = 0; i <= DEVLINK_PARAM_CMODE_MAX; i++) { if (!param_value_set[i]) continue; err = devlink_nl_param_value_fill_one(msg, param->type, i, param_value[i]); if (err) goto values_list_nest_cancel; } nla_nest_end(msg, param_values_list); nla_nest_end(msg, param_attr); genlmsg_end(msg, hdr); return 0; values_list_nest_cancel: nla_nest_end(msg, param_values_list); param_nest_cancel: nla_nest_cancel(msg, param_attr); genlmsg_cancel: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static void devlink_param_notify(struct devlink *devlink, unsigned int port_index, struct devlink_param_item *param_item, enum devlink_command cmd) { struct sk_buff *msg; int err; WARN_ON(cmd != DEVLINK_CMD_PARAM_NEW && cmd != DEVLINK_CMD_PARAM_DEL && cmd != DEVLINK_CMD_PORT_PARAM_NEW && cmd != DEVLINK_CMD_PORT_PARAM_DEL); /* devlink_notify_register() / devlink_notify_unregister() * will replay the notifications if the params are added/removed * outside of the lifetime of the instance. */ if (!devl_is_registered(devlink) || !devlink_nl_notify_need(devlink)) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; err = devlink_nl_param_fill(msg, devlink, port_index, param_item, cmd, 0, 0, 0); if (err) { nlmsg_free(msg); return; } devlink_nl_notify_send(devlink, msg); } static void devlink_params_notify(struct devlink *devlink, enum devlink_command cmd) { struct devlink_param_item *param_item; unsigned long param_id; xa_for_each(&devlink->params, param_id, param_item) devlink_param_notify(devlink, 0, param_item, cmd); } void devlink_params_notify_register(struct devlink *devlink) { devlink_params_notify(devlink, DEVLINK_CMD_PARAM_NEW); } void devlink_params_notify_unregister(struct devlink *devlink) { devlink_params_notify(devlink, DEVLINK_CMD_PARAM_DEL); } static int devlink_nl_param_get_dump_one(struct sk_buff *msg, struct devlink *devlink, struct netlink_callback *cb, int flags) { struct devlink_nl_dump_state *state = devlink_dump_state(cb); struct devlink_param_item *param_item; unsigned long param_id; int err = 0; xa_for_each_start(&devlink->params, param_id, param_item, state->idx) { err = devlink_nl_param_fill(msg, devlink, 0, param_item, DEVLINK_CMD_PARAM_GET, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, flags); if (err == -EOPNOTSUPP) { err = 0; } else if (err) { state->idx = param_id; break; } } return err; } int devlink_nl_param_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { return devlink_nl_dumpit(skb, cb, devlink_nl_param_get_dump_one); } static int devlink_param_type_get_from_info(struct genl_info *info, enum devlink_param_type *param_type) { if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_TYPE)) return -EINVAL; switch (nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_TYPE])) { case NLA_U8: *param_type = DEVLINK_PARAM_TYPE_U8; break; case NLA_U16: *param_type = DEVLINK_PARAM_TYPE_U16; break; case NLA_U32: *param_type = DEVLINK_PARAM_TYPE_U32; break; case NLA_STRING: *param_type = DEVLINK_PARAM_TYPE_STRING; break; case NLA_FLAG: *param_type = DEVLINK_PARAM_TYPE_BOOL; break; default: return -EINVAL; } return 0; } static int devlink_param_value_get_from_info(const struct devlink_param *param, struct genl_info *info, union devlink_param_value *value) { struct nlattr *param_data; int len; param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]; if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data) return -EINVAL; switch (param->type) { case DEVLINK_PARAM_TYPE_U8: if (nla_len(param_data) != sizeof(u8)) return -EINVAL; value->vu8 = nla_get_u8(param_data); break; case DEVLINK_PARAM_TYPE_U16: if (nla_len(param_data) != sizeof(u16)) return -EINVAL; value->vu16 = nla_get_u16(param_data); break; case DEVLINK_PARAM_TYPE_U32: if (nla_len(param_data) != sizeof(u32)) return -EINVAL; value->vu32 = nla_get_u32(param_data); break; case DEVLINK_PARAM_TYPE_STRING: len = strnlen(nla_data(param_data), nla_len(param_data)); if (len == nla_len(param_data) || len >= __DEVLINK_PARAM_MAX_STRING_VALUE) return -EINVAL; strcpy(value->vstr, nla_data(param_data)); break; case DEVLINK_PARAM_TYPE_BOOL: if (param_data && nla_len(param_data)) return -EINVAL; value->vbool = nla_get_flag(param_data); break; } return 0; } static struct devlink_param_item * devlink_param_get_from_info(struct xarray *params, struct genl_info *info) { char *param_name; if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_NAME)) return NULL; param_name = nla_data(info->attrs[DEVLINK_ATTR_PARAM_NAME]); return devlink_param_find_by_name(params, param_name); } int devlink_nl_param_get_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; struct devlink_param_item *param_item; struct sk_buff *msg; int err; param_item = devlink_param_get_from_info(&devlink->params, info); if (!param_item) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; err = devlink_nl_param_fill(msg, devlink, 0, param_item, DEVLINK_CMD_PARAM_GET, info->snd_portid, info->snd_seq, 0); if (err) { nlmsg_free(msg); return err; } return genlmsg_reply(msg, info); } static int __devlink_nl_cmd_param_set_doit(struct devlink *devlink, unsigned int port_index, struct xarray *params, struct genl_info *info, enum devlink_command cmd) { enum devlink_param_type param_type; struct devlink_param_gset_ctx ctx; enum devlink_param_cmode cmode; struct devlink_param_item *param_item; const struct devlink_param *param; union devlink_param_value value; int err = 0; param_item = devlink_param_get_from_info(params, info); if (!param_item) return -EINVAL; param = param_item->param; err = devlink_param_type_get_from_info(info, ¶m_type); if (err) return err; if (param_type != param->type) return -EINVAL; err = devlink_param_value_get_from_info(param, info, &value); if (err) return err; if (param->validate) { err = param->validate(devlink, param->id, value, info->extack); if (err) return err; } if (GENL_REQ_ATTR_CHECK(info, DEVLINK_ATTR_PARAM_VALUE_CMODE)) return -EINVAL; cmode = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_CMODE]); if (!devlink_param_cmode_is_supported(param, cmode)) return -EOPNOTSUPP; if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) { param_item->driverinit_value_new = value; param_item->driverinit_value_new_valid = true; } else { if (!param->set) return -EOPNOTSUPP; ctx.val = value; ctx.cmode = cmode; err = devlink_param_set(devlink, param, &ctx, info->extack); if (err) return err; } devlink_param_notify(devlink, port_index, param_item, cmd); return 0; } int devlink_nl_param_set_doit(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; return __devlink_nl_cmd_param_set_doit(devlink, 0, &devlink->params, info, DEVLINK_CMD_PARAM_NEW); } int devlink_nl_port_param_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb) { NL_SET_ERR_MSG(cb->extack, "Port params are not supported"); return msg->len; } int devlink_nl_port_param_get_doit(struct sk_buff *skb, struct genl_info *info) { NL_SET_ERR_MSG(info->extack, "Port params are not supported"); return -EINVAL; } int devlink_nl_port_param_set_doit(struct sk_buff *skb, struct genl_info *info) { NL_SET_ERR_MSG(info->extack, "Port params are not supported"); return -EINVAL; } static int devlink_param_verify(const struct devlink_param *param) { if (!param || !param->name || !param->supported_cmodes) return -EINVAL; if (param->generic) return devlink_param_generic_verify(param); else return devlink_param_driver_verify(param); } static int devlink_param_register(struct devlink *devlink, const struct devlink_param *param) { struct devlink_param_item *param_item; int err; WARN_ON(devlink_param_verify(param)); WARN_ON(devlink_param_find_by_name(&devlink->params, param->name)); if (param->supported_cmodes == BIT(DEVLINK_PARAM_CMODE_DRIVERINIT)) WARN_ON(param->get || param->set); else WARN_ON(!param->get || !param->set); param_item = kzalloc(sizeof(*param_item), GFP_KERNEL); if (!param_item) return -ENOMEM; param_item->param = param; err = xa_insert(&devlink->params, param->id, param_item, GFP_KERNEL); if (err) goto err_xa_insert; devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW); return 0; err_xa_insert: kfree(param_item); return err; } static void devlink_param_unregister(struct devlink *devlink, const struct devlink_param *param) { struct devlink_param_item *param_item; param_item = devlink_param_find_by_id(&devlink->params, param->id); if (WARN_ON(!param_item)) return; devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_DEL); xa_erase(&devlink->params, param->id); kfree(param_item); } /** * devl_params_register - register configuration parameters * * @devlink: devlink * @params: configuration parameters array * @params_count: number of parameters provided * * Register the configuration parameters supported by the driver. */ int devl_params_register(struct devlink *devlink, const struct devlink_param *params, size_t params_count) { const struct devlink_param *param = params; int i, err; lockdep_assert_held(&devlink->lock); for (i = 0; i < params_count; i++, param++) { err = devlink_param_register(devlink, param); if (err) goto rollback; } return 0; rollback: if (!i) return err; for (param--; i > 0; i--, param--) devlink_param_unregister(devlink, param); return err; } EXPORT_SYMBOL_GPL(devl_params_register); int devlink_params_register(struct devlink *devlink, const struct devlink_param *params, size_t params_count) { int err; devl_lock(devlink); err = devl_params_register(devlink, params, params_count); devl_unlock(devlink); return err; } EXPORT_SYMBOL_GPL(devlink_params_register); /** * devl_params_unregister - unregister configuration parameters * @devlink: devlink * @params: configuration parameters to unregister * @params_count: number of parameters provided */ void devl_params_unregister(struct devlink *devlink, const struct devlink_param *params, size_t params_count) { const struct devlink_param *param = params; int i; lockdep_assert_held(&devlink->lock); for (i = 0; i < params_count; i++, param++) devlink_param_unregister(devlink, param); } EXPORT_SYMBOL_GPL(devl_params_unregister); void devlink_params_unregister(struct devlink *devlink, const struct devlink_param *params, size_t params_count) { devl_lock(devlink); devl_params_unregister(devlink, params, params_count); devl_unlock(devlink); } EXPORT_SYMBOL_GPL(devlink_params_unregister); /** * devl_param_driverinit_value_get - get configuration parameter * value for driver initializing * * @devlink: devlink * @param_id: parameter ID * @val: pointer to store the value of parameter in driverinit * configuration mode * * This function should be used by the driver to get driverinit * configuration for initialization after reload command. * * Note that lockless call of this function relies on the * driver to maintain following basic sane behavior: * 1) Driver ensures a call to this function cannot race with * registering/unregistering the parameter with the same parameter ID. * 2) Driver ensures a call to this function cannot race with * devl_param_driverinit_value_set() call with the same parameter ID. * 3) Driver ensures a call to this function cannot race with * reload operation. * If the driver is not able to comply, it has to take the devlink->lock * while calling this. */ int devl_param_driverinit_value_get(struct devlink *devlink, u32 param_id, union devlink_param_value *val) { struct devlink_param_item *param_item; if (WARN_ON(!devlink_reload_supported(devlink->ops))) return -EOPNOTSUPP; param_item = devlink_param_find_by_id(&devlink->params, param_id); if (!param_item) return -EINVAL; if (!param_item->driverinit_value_valid) return -EOPNOTSUPP; if (WARN_ON(!devlink_param_cmode_is_supported(param_item->param, DEVLINK_PARAM_CMODE_DRIVERINIT))) return -EOPNOTSUPP; *val = param_item->driverinit_value; return 0; } EXPORT_SYMBOL_GPL(devl_param_driverinit_value_get); /** * devl_param_driverinit_value_set - set value of configuration * parameter for driverinit * configuration mode * * @devlink: devlink * @param_id: parameter ID * @init_val: value of parameter to set for driverinit configuration mode * * This function should be used by the driver to set driverinit * configuration mode default value. */ void devl_param_driverinit_value_set(struct devlink *devlink, u32 param_id, union devlink_param_value init_val) { struct devlink_param_item *param_item; devl_assert_locked(devlink); param_item = devlink_param_find_by_id(&devlink->params, param_id); if (WARN_ON(!param_item)) return; if (WARN_ON(!devlink_param_cmode_is_supported(param_item->param, DEVLINK_PARAM_CMODE_DRIVERINIT))) return; param_item->driverinit_value = init_val; param_item->driverinit_value_valid = true; devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW); } EXPORT_SYMBOL_GPL(devl_param_driverinit_value_set); void devlink_params_driverinit_load_new(struct devlink *devlink) { struct devlink_param_item *param_item; unsigned long param_id; xa_for_each(&devlink->params, param_id, param_item) { if (!devlink_param_cmode_is_supported(param_item->param, DEVLINK_PARAM_CMODE_DRIVERINIT) || !param_item->driverinit_value_new_valid) continue; param_item->driverinit_value = param_item->driverinit_value_new; param_item->driverinit_value_valid = true; param_item->driverinit_value_new_valid = false; } } /** * devl_param_value_changed - notify devlink on a parameter's value * change. Should be called by the driver * right after the change. * * @devlink: devlink * @param_id: parameter ID * * This function should be used by the driver to notify devlink on value * change, excluding driverinit configuration mode. * For driverinit configuration mode driver should use the function */ void devl_param_value_changed(struct devlink *devlink, u32 param_id) { struct devlink_param_item *param_item; param_item = devlink_param_find_by_id(&devlink->params, param_id); WARN_ON(!param_item); devlink_param_notify(devlink, 0, param_item, DEVLINK_CMD_PARAM_NEW); } EXPORT_SYMBOL_GPL(devl_param_value_changed); |
| 4 2 3 1 1 7 3 6 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 | // SPDX-License-Identifier: GPL-2.0-only /* * ppp_deflate.c - interface the zlib procedures for Deflate compression * and decompression (as used by gzip) to the PPP code. * * Copyright 1994-1998 Paul Mackerras. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/string.h> #include <linux/ppp_defs.h> #include <linux/ppp-comp.h> #include <linux/zlib.h> #include <asm/unaligned.h> /* * State for a Deflate (de)compressor. */ struct ppp_deflate_state { int seqno; int w_size; int unit; int mru; int debug; z_stream strm; struct compstat stats; }; #define DEFLATE_OVHD 2 /* Deflate overhead/packet */ static void *z_comp_alloc(unsigned char *options, int opt_len); static void *z_decomp_alloc(unsigned char *options, int opt_len); static void z_comp_free(void *state); static void z_decomp_free(void *state); static int z_comp_init(void *state, unsigned char *options, int opt_len, int unit, int hdrlen, int debug); static int z_decomp_init(void *state, unsigned char *options, int opt_len, int unit, int hdrlen, int mru, int debug); static int z_compress(void *state, unsigned char *rptr, unsigned char *obuf, int isize, int osize); static void z_incomp(void *state, unsigned char *ibuf, int icnt); static int z_decompress(void *state, unsigned char *ibuf, int isize, unsigned char *obuf, int osize); static void z_comp_reset(void *state); static void z_decomp_reset(void *state); static void z_comp_stats(void *state, struct compstat *stats); /** * z_comp_free - free the memory used by a compressor * @arg: pointer to the private state for the compressor. */ static void z_comp_free(void *arg) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (state) { zlib_deflateEnd(&state->strm); vfree(state->strm.workspace); kfree(state); } } /** * z_comp_alloc - allocate space for a compressor. * @options: pointer to CCP option data * @opt_len: length of the CCP option at @options. * * The @options pointer points to the a buffer containing the * CCP option data for the compression being negotiated. It is * formatted according to RFC1979, and describes the window * size that the peer is requesting that we use in compressing * data to be sent to it. * * Returns the pointer to the private state for the compressor, * or NULL if we could not allocate enough memory. */ static void *z_comp_alloc(unsigned char *options, int opt_len) { struct ppp_deflate_state *state; int w_size; if (opt_len != CILEN_DEFLATE || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) || options[1] != CILEN_DEFLATE || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL || options[3] != DEFLATE_CHK_SEQUENCE) return NULL; w_size = DEFLATE_SIZE(options[2]); if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) return NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return NULL; state->strm.next_in = NULL; state->w_size = w_size; state->strm.workspace = vmalloc(zlib_deflate_workspacesize(-w_size, 8)); if (state->strm.workspace == NULL) goto out_free; if (zlib_deflateInit2(&state->strm, Z_DEFAULT_COMPRESSION, DEFLATE_METHOD_VAL, -w_size, 8, Z_DEFAULT_STRATEGY) != Z_OK) goto out_free; return (void *) state; out_free: z_comp_free(state); return NULL; } /** * z_comp_init - initialize a previously-allocated compressor. * @arg: pointer to the private state for the compressor * @options: pointer to the CCP option data describing the * compression that was negotiated with the peer * @opt_len: length of the CCP option data at @options * @unit: PPP unit number for diagnostic messages * @hdrlen: ignored (present for backwards compatibility) * @debug: debug flag; if non-zero, debug messages are printed. * * The CCP options described by @options must match the options * specified when the compressor was allocated. The compressor * history is reset. Returns 0 for failure (CCP options don't * match) or 1 for success. */ static int z_comp_init(void *arg, unsigned char *options, int opt_len, int unit, int hdrlen, int debug) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (opt_len < CILEN_DEFLATE || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) || options[1] != CILEN_DEFLATE || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL || DEFLATE_SIZE(options[2]) != state->w_size || options[3] != DEFLATE_CHK_SEQUENCE) return 0; state->seqno = 0; state->unit = unit; state->debug = debug; zlib_deflateReset(&state->strm); return 1; } /** * z_comp_reset - reset a previously-allocated compressor. * @arg: pointer to private state for the compressor. * * This clears the history for the compressor and makes it * ready to start emitting a new compressed stream. */ static void z_comp_reset(void *arg) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; state->seqno = 0; zlib_deflateReset(&state->strm); } /** * z_compress - compress a PPP packet with Deflate compression. * @arg: pointer to private state for the compressor * @rptr: uncompressed packet (input) * @obuf: compressed packet (output) * @isize: size of uncompressed packet * @osize: space available at @obuf * * Returns the length of the compressed packet, or 0 if the * packet is incompressible. */ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf, int isize, int osize) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; int r, proto, off, olen, oavail; unsigned char *wptr; /* * Check that the protocol is in the range we handle. */ proto = PPP_PROTOCOL(rptr); if (proto > 0x3fff || proto == 0xfd || proto == 0xfb) return 0; /* Don't generate compressed packets which are larger than the uncompressed packet. */ if (osize > isize) osize = isize; wptr = obuf; /* * Copy over the PPP header and store the 2-byte sequence number. */ wptr[0] = PPP_ADDRESS(rptr); wptr[1] = PPP_CONTROL(rptr); put_unaligned_be16(PPP_COMP, wptr + 2); wptr += PPP_HDRLEN; put_unaligned_be16(state->seqno, wptr); wptr += DEFLATE_OVHD; olen = PPP_HDRLEN + DEFLATE_OVHD; state->strm.next_out = wptr; state->strm.avail_out = oavail = osize - olen; ++state->seqno; off = (proto > 0xff) ? 2 : 3; /* skip 1st proto byte if 0 */ rptr += off; state->strm.next_in = rptr; state->strm.avail_in = (isize - off); for (;;) { r = zlib_deflate(&state->strm, Z_PACKET_FLUSH); if (r != Z_OK) { if (state->debug) printk(KERN_ERR "z_compress: deflate returned %d\n", r); break; } if (state->strm.avail_out == 0) { olen += oavail; state->strm.next_out = NULL; state->strm.avail_out = oavail = 1000000; } else { break; /* all done */ } } olen += oavail - state->strm.avail_out; /* * See if we managed to reduce the size of the packet. */ if (olen < isize && olen <= osize) { state->stats.comp_bytes += olen; state->stats.comp_packets++; } else { state->stats.inc_bytes += isize; state->stats.inc_packets++; olen = 0; } state->stats.unc_bytes += isize; state->stats.unc_packets++; return olen; } /** * z_comp_stats - return compression statistics for a compressor * or decompressor. * @arg: pointer to private space for the (de)compressor * @stats: pointer to a struct compstat to receive the result. */ static void z_comp_stats(void *arg, struct compstat *stats) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; *stats = state->stats; } /** * z_decomp_free - Free the memory used by a decompressor. * @arg: pointer to private space for the decompressor. */ static void z_decomp_free(void *arg) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (state) { vfree(state->strm.workspace); kfree(state); } } /** * z_decomp_alloc - allocate space for a decompressor. * @options: pointer to CCP option data * @opt_len: length of the CCP option at @options. * * The @options pointer points to the a buffer containing the * CCP option data for the compression being negotiated. It is * formatted according to RFC1979, and describes the window * size that we are requesting the peer to use in compressing * data to be sent to us. * * Returns the pointer to the private state for the decompressor, * or NULL if we could not allocate enough memory. */ static void *z_decomp_alloc(unsigned char *options, int opt_len) { struct ppp_deflate_state *state; int w_size; if (opt_len != CILEN_DEFLATE || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) || options[1] != CILEN_DEFLATE || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL || options[3] != DEFLATE_CHK_SEQUENCE) return NULL; w_size = DEFLATE_SIZE(options[2]); if (w_size < DEFLATE_MIN_SIZE || w_size > DEFLATE_MAX_SIZE) return NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) return NULL; state->w_size = w_size; state->strm.next_out = NULL; state->strm.workspace = vmalloc(zlib_inflate_workspacesize()); if (state->strm.workspace == NULL) goto out_free; if (zlib_inflateInit2(&state->strm, -w_size) != Z_OK) goto out_free; return (void *) state; out_free: z_decomp_free(state); return NULL; } /** * z_decomp_init - initialize a previously-allocated decompressor. * @arg: pointer to the private state for the decompressor * @options: pointer to the CCP option data describing the * compression that was negotiated with the peer * @opt_len: length of the CCP option data at @options * @unit: PPP unit number for diagnostic messages * @hdrlen: ignored (present for backwards compatibility) * @mru: maximum length of decompressed packets * @debug: debug flag; if non-zero, debug messages are printed. * * The CCP options described by @options must match the options * specified when the decompressor was allocated. The decompressor * history is reset. Returns 0 for failure (CCP options don't * match) or 1 for success. */ static int z_decomp_init(void *arg, unsigned char *options, int opt_len, int unit, int hdrlen, int mru, int debug) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; if (opt_len < CILEN_DEFLATE || (options[0] != CI_DEFLATE && options[0] != CI_DEFLATE_DRAFT) || options[1] != CILEN_DEFLATE || DEFLATE_METHOD(options[2]) != DEFLATE_METHOD_VAL || DEFLATE_SIZE(options[2]) != state->w_size || options[3] != DEFLATE_CHK_SEQUENCE) return 0; state->seqno = 0; state->unit = unit; state->debug = debug; state->mru = mru; zlib_inflateReset(&state->strm); return 1; } /** * z_decomp_reset - reset a previously-allocated decompressor. * @arg: pointer to private state for the decompressor. * * This clears the history for the decompressor and makes it * ready to receive a new compressed stream. */ static void z_decomp_reset(void *arg) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; state->seqno = 0; zlib_inflateReset(&state->strm); } /** * z_decompress - decompress a Deflate-compressed packet. * @arg: pointer to private state for the decompressor * @ibuf: pointer to input (compressed) packet data * @isize: length of input packet * @obuf: pointer to space for output (decompressed) packet * @osize: amount of space available at @obuf * * Because of patent problems, we return DECOMP_ERROR for errors * found by inspecting the input data and for system problems, but * DECOMP_FATALERROR for any errors which could possibly be said to * be being detected "after" decompression. For DECOMP_ERROR, * we can issue a CCP reset-request; for DECOMP_FATALERROR, we may be * infringing a patent of Motorola's if we do, so we take CCP down * instead. * * Given that the frame has the correct sequence number and a good FCS, * errors such as invalid codes in the input most likely indicate a * bug, so we return DECOMP_FATALERROR for them in order to turn off * compression, even though they are detected by inspecting the input. */ static int z_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, int osize) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; int olen, seq, r; int decode_proto, overflow; unsigned char overflow_buf[1]; if (isize <= PPP_HDRLEN + DEFLATE_OVHD) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: short pkt (%d)\n", state->unit, isize); return DECOMP_ERROR; } /* Check the sequence number. */ seq = get_unaligned_be16(ibuf + PPP_HDRLEN); if (seq != (state->seqno & 0xffff)) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", state->unit, seq, state->seqno & 0xffff); return DECOMP_ERROR; } ++state->seqno; /* * Fill in the first part of the PPP header. The protocol field * comes from the decompressed data. */ obuf[0] = PPP_ADDRESS(ibuf); obuf[1] = PPP_CONTROL(ibuf); obuf[2] = 0; /* * Set up to call inflate. We set avail_out to 1 initially so we can * look at the first byte of the output and decide whether we have * a 1-byte or 2-byte protocol field. */ state->strm.next_in = ibuf + PPP_HDRLEN + DEFLATE_OVHD; state->strm.avail_in = isize - (PPP_HDRLEN + DEFLATE_OVHD); state->strm.next_out = obuf + 3; state->strm.avail_out = 1; decode_proto = 1; overflow = 0; /* * Call inflate, supplying more input or output as needed. */ for (;;) { r = zlib_inflate(&state->strm, Z_PACKET_FLUSH); if (r != Z_OK) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: inflate returned %d (%s)\n", state->unit, r, (state->strm.msg? state->strm.msg: "")); return DECOMP_FATALERROR; } if (state->strm.avail_out != 0) break; /* all done */ if (decode_proto) { state->strm.avail_out = osize - PPP_HDRLEN; if ((obuf[3] & 1) == 0) { /* 2-byte protocol field */ obuf[2] = obuf[3]; --state->strm.next_out; ++state->strm.avail_out; } decode_proto = 0; } else if (!overflow) { /* * We've filled up the output buffer; the only way to * find out whether inflate has any more characters * left is to give it another byte of output space. */ state->strm.next_out = overflow_buf; state->strm.avail_out = 1; overflow = 1; } else { if (state->debug) printk(KERN_DEBUG "z_decompress%d: ran out of mru\n", state->unit); return DECOMP_FATALERROR; } } if (decode_proto) { if (state->debug) printk(KERN_DEBUG "z_decompress%d: didn't get proto\n", state->unit); return DECOMP_ERROR; } olen = osize + overflow - state->strm.avail_out; state->stats.unc_bytes += olen; state->stats.unc_packets++; state->stats.comp_bytes += isize; state->stats.comp_packets++; return olen; } /** * z_incomp - add incompressible input data to the history. * @arg: pointer to private state for the decompressor * @ibuf: pointer to input packet data * @icnt: length of input data. */ static void z_incomp(void *arg, unsigned char *ibuf, int icnt) { struct ppp_deflate_state *state = (struct ppp_deflate_state *) arg; int proto, r; /* * Check that the protocol is one we handle. */ proto = PPP_PROTOCOL(ibuf); if (proto > 0x3fff || proto == 0xfd || proto == 0xfb) return; ++state->seqno; /* * We start at the either the 1st or 2nd byte of the protocol field, * depending on whether the protocol value is compressible. */ state->strm.next_in = ibuf + 3; state->strm.avail_in = icnt - 3; if (proto > 0xff) { --state->strm.next_in; ++state->strm.avail_in; } r = zlib_inflateIncomp(&state->strm); if (r != Z_OK) { /* gak! */ if (state->debug) { printk(KERN_DEBUG "z_incomp%d: inflateIncomp returned %d (%s)\n", state->unit, r, (state->strm.msg? state->strm.msg: "")); } return; } /* * Update stats. */ state->stats.inc_bytes += icnt; state->stats.inc_packets++; state->stats.unc_bytes += icnt; state->stats.unc_packets++; } /************************************************************* * Module interface table *************************************************************/ /* These are in ppp_generic.c */ extern int ppp_register_compressor (struct compressor *cp); extern void ppp_unregister_compressor (struct compressor *cp); /* * Procedures exported to if_ppp.c. */ static struct compressor ppp_deflate = { .compress_proto = CI_DEFLATE, .comp_alloc = z_comp_alloc, .comp_free = z_comp_free, .comp_init = z_comp_init, .comp_reset = z_comp_reset, .compress = z_compress, .comp_stat = z_comp_stats, .decomp_alloc = z_decomp_alloc, .decomp_free = z_decomp_free, .decomp_init = z_decomp_init, .decomp_reset = z_decomp_reset, .decompress = z_decompress, .incomp = z_incomp, .decomp_stat = z_comp_stats, .owner = THIS_MODULE }; static struct compressor ppp_deflate_draft = { .compress_proto = CI_DEFLATE_DRAFT, .comp_alloc = z_comp_alloc, .comp_free = z_comp_free, .comp_init = z_comp_init, .comp_reset = z_comp_reset, .compress = z_compress, .comp_stat = z_comp_stats, .decomp_alloc = z_decomp_alloc, .decomp_free = z_decomp_free, .decomp_init = z_decomp_init, .decomp_reset = z_decomp_reset, .decompress = z_decompress, .incomp = z_incomp, .decomp_stat = z_comp_stats, .owner = THIS_MODULE }; static int __init deflate_init(void) { int rc; rc = ppp_register_compressor(&ppp_deflate); if (rc) return rc; rc = ppp_register_compressor(&ppp_deflate_draft); if (rc) { ppp_unregister_compressor(&ppp_deflate); return rc; } pr_info("PPP Deflate Compression module registered\n"); return 0; } static void __exit deflate_cleanup(void) { ppp_unregister_compressor(&ppp_deflate); ppp_unregister_compressor(&ppp_deflate_draft); } module_init(deflate_init); module_exit(deflate_cleanup); MODULE_DESCRIPTION("PPP Deflate compression module"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE)); MODULE_ALIAS("ppp-compress-" __stringify(CI_DEFLATE_DRAFT)); |
| 65 58 66 2 2 5 2 3 2 1 2 2 2 21 17 3 16 3 3 2 3 12 9 3 7 36 1 1 1 2 1 14 19 1 28 1 29 71 70 5 1 3 1 2 1 1 2 21 1 36 1 1 1 1 9 9 2 65 58 2 58 58 66 13 65 66 65 87 65 58 65 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 | // SPDX-License-Identifier: GPL-2.0-or-later /* i2c-dev.c - i2c-bus driver, char device interface Copyright (C) 1995-97 Simon G. Vogl Copyright (C) 1998-99 Frodo Looijaard <frodol@dds.nl> Copyright (C) 2003 Greg Kroah-Hartman <greg@kroah.com> */ /* Note that this is a complete rewrite of Simon Vogl's i2c-dev module. But I have used so much of his original code and ideas that it seems only fair to recognize him as co-author -- Frodo */ /* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cdev.h> #include <linux/compat.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/i2c-dev.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/slab.h> #include <linux/uaccess.h> /* * An i2c_dev represents an i2c_adapter ... an I2C or SMBus master, not a * slave (i2c_client) with which messages will be exchanged. It's coupled * with a character special file which is accessed by user mode drivers. * * The list of i2c_dev structures is parallel to the i2c_adapter lists * maintained by the driver model, and is updated using bus notifications. */ struct i2c_dev { struct list_head list; struct i2c_adapter *adap; struct device dev; struct cdev cdev; }; #define I2C_MINORS (MINORMASK + 1) static LIST_HEAD(i2c_dev_list); static DEFINE_SPINLOCK(i2c_dev_list_lock); static struct i2c_dev *i2c_dev_get_by_minor(unsigned index) { struct i2c_dev *i2c_dev; spin_lock(&i2c_dev_list_lock); list_for_each_entry(i2c_dev, &i2c_dev_list, list) { if (i2c_dev->adap->nr == index) goto found; } i2c_dev = NULL; found: spin_unlock(&i2c_dev_list_lock); return i2c_dev; } static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap) { struct i2c_dev *i2c_dev; if (adap->nr >= I2C_MINORS) { pr_err("Out of device minors (%d)\n", adap->nr); return ERR_PTR(-ENODEV); } i2c_dev = kzalloc(sizeof(*i2c_dev), GFP_KERNEL); if (!i2c_dev) return ERR_PTR(-ENOMEM); i2c_dev->adap = adap; spin_lock(&i2c_dev_list_lock); list_add_tail(&i2c_dev->list, &i2c_dev_list); spin_unlock(&i2c_dev_list_lock); return i2c_dev; } static void put_i2c_dev(struct i2c_dev *i2c_dev, bool del_cdev) { spin_lock(&i2c_dev_list_lock); list_del(&i2c_dev->list); spin_unlock(&i2c_dev_list_lock); if (del_cdev) cdev_device_del(&i2c_dev->cdev, &i2c_dev->dev); put_device(&i2c_dev->dev); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_dev *i2c_dev = i2c_dev_get_by_minor(MINOR(dev->devt)); if (!i2c_dev) return -ENODEV; return sysfs_emit(buf, "%s\n", i2c_dev->adap->name); } static DEVICE_ATTR_RO(name); static struct attribute *i2c_attrs[] = { &dev_attr_name.attr, NULL, }; ATTRIBUTE_GROUPS(i2c); /* ------------------------------------------------------------------------- */ /* * After opening an instance of this character special file, a file * descriptor starts out associated only with an i2c_adapter (and bus). * * Using the I2C_RDWR ioctl(), you can then *immediately* issue i2c_msg * traffic to any devices on the bus used by that adapter. That's because * the i2c_msg vectors embed all the addressing information they need, and * are submitted directly to an i2c_adapter. However, SMBus-only adapters * don't support that interface. * * To use read()/write() system calls on that file descriptor, or to use * SMBus interfaces (and work with SMBus-only hosts!), you must first issue * an I2C_SLAVE (or I2C_SLAVE_FORCE) ioctl. That configures an anonymous * (never registered) i2c_client so it holds the addressing information * needed by those system calls and by this SMBus interface. */ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { char *tmp; int ret; struct i2c_client *client = file->private_data; /* Adapter must support I2C transfers */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -EOPNOTSUPP; if (count > 8192) count = 8192; tmp = kzalloc(count, GFP_KERNEL); if (tmp == NULL) return -ENOMEM; pr_debug("i2c-%d reading %zu bytes.\n", iminor(file_inode(file)), count); ret = i2c_master_recv(client, tmp, count); if (ret >= 0) if (copy_to_user(buf, tmp, ret)) ret = -EFAULT; kfree(tmp); return ret; } static ssize_t i2cdev_write(struct file *file, const char __user *buf, size_t count, loff_t *offset) { int ret; char *tmp; struct i2c_client *client = file->private_data; /* Adapter must support I2C transfers */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -EOPNOTSUPP; if (count > 8192) count = 8192; tmp = memdup_user(buf, count); if (IS_ERR(tmp)) return PTR_ERR(tmp); pr_debug("i2c-%d writing %zu bytes.\n", iminor(file_inode(file)), count); ret = i2c_master_send(client, tmp, count); kfree(tmp); return ret; } static int i2cdev_check(struct device *dev, void *addrp) { struct i2c_client *client = i2c_verify_client(dev); if (!client || client->addr != *(unsigned int *)addrp) return 0; return dev->driver ? -EBUSY : 0; } /* walk up mux tree */ static int i2cdev_check_mux_parents(struct i2c_adapter *adapter, int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result; result = device_for_each_child(&adapter->dev, &addr, i2cdev_check); if (!result && parent) result = i2cdev_check_mux_parents(parent, addr); return result; } /* recurse down mux tree */ static int i2cdev_check_mux_children(struct device *dev, void *addrp) { int result; if (dev->type == &i2c_adapter_type) result = device_for_each_child(dev, addrp, i2cdev_check_mux_children); else result = i2cdev_check(dev, addrp); return result; } /* This address checking function differs from the one in i2c-core in that it considers an address with a registered device, but no driver bound to it, as NOT busy. */ static int i2cdev_check_addr(struct i2c_adapter *adapter, unsigned int addr) { struct i2c_adapter *parent = i2c_parent_is_i2c_adapter(adapter); int result = 0; if (parent) result = i2cdev_check_mux_parents(parent, addr); if (!result) result = device_for_each_child(&adapter->dev, &addr, i2cdev_check_mux_children); return result; } static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client, unsigned nmsgs, struct i2c_msg *msgs) { u8 __user **data_ptrs; int i, res; /* Adapter must support I2C transfers */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -EOPNOTSUPP; data_ptrs = kmalloc_array(nmsgs, sizeof(u8 __user *), GFP_KERNEL); if (data_ptrs == NULL) { kfree(msgs); return -ENOMEM; } res = 0; for (i = 0; i < nmsgs; i++) { /* Limit the size of the message to a sane amount */ if (msgs[i].len > 8192) { res = -EINVAL; break; } data_ptrs[i] = (u8 __user *)msgs[i].buf; msgs[i].buf = memdup_user(data_ptrs[i], msgs[i].len); if (IS_ERR(msgs[i].buf)) { res = PTR_ERR(msgs[i].buf); break; } /* memdup_user allocates with GFP_KERNEL, so DMA is ok */ msgs[i].flags |= I2C_M_DMA_SAFE; /* * If the message length is received from the slave (similar * to SMBus block read), we must ensure that the buffer will * be large enough to cope with a message length of * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus * drivers allow. The first byte in the buffer must be * pre-filled with the number of extra bytes, which must be * at least one to hold the message length, but can be * greater (for example to account for a checksum byte at * the end of the message.) */ if (msgs[i].flags & I2C_M_RECV_LEN) { if (!(msgs[i].flags & I2C_M_RD) || msgs[i].len < 1 || msgs[i].buf[0] < 1 || msgs[i].len < msgs[i].buf[0] + I2C_SMBUS_BLOCK_MAX) { i++; res = -EINVAL; break; } msgs[i].len = msgs[i].buf[0]; } } if (res < 0) { int j; for (j = 0; j < i; ++j) kfree(msgs[j].buf); kfree(data_ptrs); kfree(msgs); return res; } res = i2c_transfer(client->adapter, msgs, nmsgs); while (i-- > 0) { if (res >= 0 && (msgs[i].flags & I2C_M_RD)) { if (copy_to_user(data_ptrs[i], msgs[i].buf, msgs[i].len)) res = -EFAULT; } kfree(msgs[i].buf); } kfree(data_ptrs); kfree(msgs); return res; } static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, u8 read_write, u8 command, u32 size, union i2c_smbus_data __user *data) { union i2c_smbus_data temp = {}; int datasize, res; if ((size != I2C_SMBUS_BYTE) && (size != I2C_SMBUS_QUICK) && (size != I2C_SMBUS_BYTE_DATA) && (size != I2C_SMBUS_WORD_DATA) && (size != I2C_SMBUS_PROC_CALL) && (size != I2C_SMBUS_BLOCK_DATA) && (size != I2C_SMBUS_I2C_BLOCK_BROKEN) && (size != I2C_SMBUS_I2C_BLOCK_DATA) && (size != I2C_SMBUS_BLOCK_PROC_CALL)) { dev_dbg(&client->adapter->dev, "size out of range (%x) in ioctl I2C_SMBUS.\n", size); return -EINVAL; } /* Note that I2C_SMBUS_READ and I2C_SMBUS_WRITE are 0 and 1, so the check is valid if size==I2C_SMBUS_QUICK too. */ if ((read_write != I2C_SMBUS_READ) && (read_write != I2C_SMBUS_WRITE)) { dev_dbg(&client->adapter->dev, "read_write out of range (%x) in ioctl I2C_SMBUS.\n", read_write); return -EINVAL; } /* Note that command values are always valid! */ if ((size == I2C_SMBUS_QUICK) || ((size == I2C_SMBUS_BYTE) && (read_write == I2C_SMBUS_WRITE))) /* These are special: we do not use data */ return i2c_smbus_xfer(client->adapter, client->addr, client->flags, read_write, command, size, NULL); if (data == NULL) { dev_dbg(&client->adapter->dev, "data is NULL pointer in ioctl I2C_SMBUS.\n"); return -EINVAL; } if ((size == I2C_SMBUS_BYTE_DATA) || (size == I2C_SMBUS_BYTE)) datasize = sizeof(data->byte); else if ((size == I2C_SMBUS_WORD_DATA) || (size == I2C_SMBUS_PROC_CALL)) datasize = sizeof(data->word); else /* size == smbus block, i2c block, or block proc. call */ datasize = sizeof(data->block); if ((size == I2C_SMBUS_PROC_CALL) || (size == I2C_SMBUS_BLOCK_PROC_CALL) || (size == I2C_SMBUS_I2C_BLOCK_DATA) || (read_write == I2C_SMBUS_WRITE)) { if (copy_from_user(&temp, data, datasize)) return -EFAULT; } if (size == I2C_SMBUS_I2C_BLOCK_BROKEN) { /* Convert old I2C block commands to the new convention. This preserves binary compatibility. */ size = I2C_SMBUS_I2C_BLOCK_DATA; if (read_write == I2C_SMBUS_READ) temp.block[0] = I2C_SMBUS_BLOCK_MAX; } res = i2c_smbus_xfer(client->adapter, client->addr, client->flags, read_write, command, size, &temp); if (!res && ((size == I2C_SMBUS_PROC_CALL) || (size == I2C_SMBUS_BLOCK_PROC_CALL) || (read_write == I2C_SMBUS_READ))) { if (copy_to_user(data, &temp, datasize)) return -EFAULT; } return res; } static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct i2c_client *client = file->private_data; unsigned long funcs; dev_dbg(&client->adapter->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n", cmd, arg); switch (cmd) { case I2C_SLAVE: case I2C_SLAVE_FORCE: if ((arg > 0x3ff) || (((client->flags & I2C_M_TEN) == 0) && arg > 0x7f)) return -EINVAL; if (cmd == I2C_SLAVE && i2cdev_check_addr(client->adapter, arg)) return -EBUSY; /* REVISIT: address could become busy later */ client->addr = arg; return 0; case I2C_TENBIT: if (arg) client->flags |= I2C_M_TEN; else client->flags &= ~I2C_M_TEN; return 0; case I2C_PEC: /* * Setting the PEC flag here won't affect kernel drivers, * which will be using the i2c_client node registered with * the driver model core. Likewise, when that client has * the PEC flag already set, the i2c-dev driver won't see * (or use) this setting. */ if (arg) client->flags |= I2C_CLIENT_PEC; else client->flags &= ~I2C_CLIENT_PEC; return 0; case I2C_FUNCS: funcs = i2c_get_functionality(client->adapter); return put_user(funcs, (unsigned long __user *)arg); case I2C_RDWR: { struct i2c_rdwr_ioctl_data rdwr_arg; struct i2c_msg *rdwr_pa; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data __user *)arg, sizeof(rdwr_arg))) return -EFAULT; if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0) return -EINVAL; /* * Put an arbitrary limit on the number of messages that can * be sent at once */ if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) return -EINVAL; rdwr_pa = memdup_array_user(rdwr_arg.msgs, rdwr_arg.nmsgs, sizeof(struct i2c_msg)); if (IS_ERR(rdwr_pa)) return PTR_ERR(rdwr_pa); return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); } case I2C_SMBUS: { struct i2c_smbus_ioctl_data data_arg; if (copy_from_user(&data_arg, (struct i2c_smbus_ioctl_data __user *) arg, sizeof(struct i2c_smbus_ioctl_data))) return -EFAULT; return i2cdev_ioctl_smbus(client, data_arg.read_write, data_arg.command, data_arg.size, data_arg.data); } case I2C_RETRIES: if (arg > INT_MAX) return -EINVAL; client->adapter->retries = arg; break; case I2C_TIMEOUT: if (arg > INT_MAX) return -EINVAL; /* For historical reasons, user-space sets the timeout * value in units of 10 ms. */ client->adapter->timeout = msecs_to_jiffies(arg * 10); break; default: /* NOTE: returning a fault code here could cause trouble * in buggy userspace code. Some old kernel bugs returned * zero in this case, and userspace code might accidentally * have depended on that bug. */ return -ENOTTY; } return 0; } #ifdef CONFIG_COMPAT struct i2c_smbus_ioctl_data32 { u8 read_write; u8 command; u32 size; compat_caddr_t data; /* union i2c_smbus_data *data */ }; struct i2c_msg32 { u16 addr; u16 flags; u16 len; compat_caddr_t buf; }; struct i2c_rdwr_ioctl_data32 { compat_caddr_t msgs; /* struct i2c_msg __user *msgs */ u32 nmsgs; }; static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct i2c_client *client = file->private_data; unsigned long funcs; switch (cmd) { case I2C_FUNCS: funcs = i2c_get_functionality(client->adapter); return put_user(funcs, (compat_ulong_t __user *)arg); case I2C_RDWR: { struct i2c_rdwr_ioctl_data32 rdwr_arg; struct i2c_msg32 __user *p; struct i2c_msg *rdwr_pa; int i; if (copy_from_user(&rdwr_arg, (struct i2c_rdwr_ioctl_data32 __user *)arg, sizeof(rdwr_arg))) return -EFAULT; if (!rdwr_arg.msgs || rdwr_arg.nmsgs == 0) return -EINVAL; if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS) return -EINVAL; rdwr_pa = kmalloc_array(rdwr_arg.nmsgs, sizeof(struct i2c_msg), GFP_KERNEL); if (!rdwr_pa) return -ENOMEM; p = compat_ptr(rdwr_arg.msgs); for (i = 0; i < rdwr_arg.nmsgs; i++) { struct i2c_msg32 umsg; if (copy_from_user(&umsg, p + i, sizeof(umsg))) { kfree(rdwr_pa); return -EFAULT; } rdwr_pa[i] = (struct i2c_msg) { .addr = umsg.addr, .flags = umsg.flags, .len = umsg.len, .buf = (__force __u8 *)compat_ptr(umsg.buf), }; } return i2cdev_ioctl_rdwr(client, rdwr_arg.nmsgs, rdwr_pa); } case I2C_SMBUS: { struct i2c_smbus_ioctl_data32 data32; if (copy_from_user(&data32, (void __user *) arg, sizeof(data32))) return -EFAULT; return i2cdev_ioctl_smbus(client, data32.read_write, data32.command, data32.size, compat_ptr(data32.data)); } default: return i2cdev_ioctl(file, cmd, arg); } } #else #define compat_i2cdev_ioctl NULL #endif static int i2cdev_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct i2c_client *client; struct i2c_adapter *adap; adap = i2c_get_adapter(minor); if (!adap) return -ENODEV; /* This creates an anonymous i2c_client, which may later be * pointed to some address using I2C_SLAVE or I2C_SLAVE_FORCE. * * This client is ** NEVER REGISTERED ** with the driver model * or I2C core code!! It just holds private copies of addressing * information and maybe a PEC flag. */ client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) { i2c_put_adapter(adap); return -ENOMEM; } snprintf(client->name, I2C_NAME_SIZE, "i2c-dev %d", adap->nr); client->adapter = adap; file->private_data = client; return 0; } static int i2cdev_release(struct inode *inode, struct file *file) { struct i2c_client *client = file->private_data; i2c_put_adapter(client->adapter); kfree(client); file->private_data = NULL; return 0; } static const struct file_operations i2cdev_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = i2cdev_read, .write = i2cdev_write, .unlocked_ioctl = i2cdev_ioctl, .compat_ioctl = compat_i2cdev_ioctl, .open = i2cdev_open, .release = i2cdev_release, }; /* ------------------------------------------------------------------------- */ static const struct class i2c_dev_class = { .name = "i2c-dev", .dev_groups = i2c_groups, }; static void i2cdev_dev_release(struct device *dev) { struct i2c_dev *i2c_dev; i2c_dev = container_of(dev, struct i2c_dev, dev); kfree(i2c_dev); } static int i2cdev_attach_adapter(struct device *dev) { struct i2c_adapter *adap; struct i2c_dev *i2c_dev; int res; if (dev->type != &i2c_adapter_type) return NOTIFY_DONE; adap = to_i2c_adapter(dev); i2c_dev = get_free_i2c_dev(adap); if (IS_ERR(i2c_dev)) return NOTIFY_DONE; cdev_init(&i2c_dev->cdev, &i2cdev_fops); i2c_dev->cdev.owner = THIS_MODULE; device_initialize(&i2c_dev->dev); i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr); i2c_dev->dev.class = &i2c_dev_class; i2c_dev->dev.parent = &adap->dev; i2c_dev->dev.release = i2cdev_dev_release; res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr); if (res) goto err_put_i2c_dev; res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev); if (res) goto err_put_i2c_dev; pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr); return NOTIFY_OK; err_put_i2c_dev: put_i2c_dev(i2c_dev, false); return NOTIFY_DONE; } static int i2cdev_detach_adapter(struct device *dev) { struct i2c_adapter *adap; struct i2c_dev *i2c_dev; if (dev->type != &i2c_adapter_type) return NOTIFY_DONE; adap = to_i2c_adapter(dev); i2c_dev = i2c_dev_get_by_minor(adap->nr); if (!i2c_dev) /* attach_adapter must have failed */ return NOTIFY_DONE; put_i2c_dev(i2c_dev, true); pr_debug("adapter [%s] unregistered\n", adap->name); return NOTIFY_OK; } static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; switch (action) { case BUS_NOTIFY_ADD_DEVICE: return i2cdev_attach_adapter(dev); case BUS_NOTIFY_DEL_DEVICE: return i2cdev_detach_adapter(dev); } return NOTIFY_DONE; } static struct notifier_block i2cdev_notifier = { .notifier_call = i2cdev_notifier_call, }; /* ------------------------------------------------------------------------- */ static int __init i2c_dev_attach_adapter(struct device *dev, void *dummy) { i2cdev_attach_adapter(dev); return 0; } static int __exit i2c_dev_detach_adapter(struct device *dev, void *dummy) { i2cdev_detach_adapter(dev); return 0; } /* * module load/unload record keeping */ static int __init i2c_dev_init(void) { int res; pr_info("i2c /dev entries driver\n"); res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c"); if (res) goto out; res = class_register(&i2c_dev_class); if (res) goto out_unreg_chrdev; /* Keep track of adapters which will be added or removed later */ res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier); if (res) goto out_unreg_class; /* Bind to already existing adapters right away */ i2c_for_each_dev(NULL, i2c_dev_attach_adapter); return 0; out_unreg_class: class_unregister(&i2c_dev_class); out_unreg_chrdev: unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); out: pr_err("Driver Initialisation failed\n"); return res; } static void __exit i2c_dev_exit(void) { bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier); i2c_for_each_dev(NULL, i2c_dev_detach_adapter); class_unregister(&i2c_dev_class); unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C /dev entries driver"); MODULE_LICENSE("GPL"); module_init(i2c_dev_init); module_exit(i2c_dev_exit); |
| 13 93 84 206 140 415 12 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ #ifndef _EXFAT_FS_H #define _EXFAT_FS_H #include <linux/fs.h> #include <linux/ratelimit.h> #include <linux/nls.h> #include <linux/blkdev.h> #define EXFAT_ROOT_INO 1 #define EXFAT_CLUSTERS_UNTRACKED (~0u) /* * exfat error flags */ enum exfat_error_mode { EXFAT_ERRORS_CONT, /* ignore error and continue */ EXFAT_ERRORS_PANIC, /* panic on error */ EXFAT_ERRORS_RO, /* remount r/o on error */ }; /* * exfat nls lossy flag */ enum { NLS_NAME_NO_LOSSY = 0, /* no lossy */ NLS_NAME_LOSSY = 1 << 0, /* just detected incorrect filename(s) */ NLS_NAME_OVERLEN = 1 << 1, /* the length is over than its limit */ }; #define EXFAT_HASH_BITS 8 #define EXFAT_HASH_SIZE (1UL << EXFAT_HASH_BITS) /* * Type Definitions */ #define ES_2_ENTRIES 2 #define ES_ALL_ENTRIES 0 #define ES_IDX_FILE 0 #define ES_IDX_STREAM 1 #define ES_IDX_FIRST_FILENAME 2 #define EXFAT_FILENAME_ENTRY_NUM(name_len) \ DIV_ROUND_UP(name_len, EXFAT_FILE_NAME_LEN) #define ES_IDX_LAST_FILENAME(name_len) \ (ES_IDX_FIRST_FILENAME + EXFAT_FILENAME_ENTRY_NUM(name_len) - 1) #define DIR_DELETED 0xFFFFFFF7 /* type values */ #define TYPE_UNUSED 0x0000 #define TYPE_DELETED 0x0001 #define TYPE_INVALID 0x0002 #define TYPE_CRITICAL_PRI 0x0100 #define TYPE_BITMAP 0x0101 #define TYPE_UPCASE 0x0102 #define TYPE_VOLUME 0x0103 #define TYPE_DIR 0x0104 #define TYPE_FILE 0x011F #define TYPE_CRITICAL_SEC 0x0200 #define TYPE_STREAM 0x0201 #define TYPE_EXTEND 0x0202 #define TYPE_ACL 0x0203 #define TYPE_BENIGN_PRI 0x0400 #define TYPE_GUID 0x0401 #define TYPE_PADDING 0x0402 #define TYPE_ACLTAB 0x0403 #define TYPE_BENIGN_SEC 0x0800 #define TYPE_VENDOR_EXT 0x0801 #define TYPE_VENDOR_ALLOC 0x0802 #define MAX_CHARSET_SIZE 6 /* max size of multi-byte character */ #define MAX_NAME_LENGTH 255 /* max len of file name excluding NULL */ #define MAX_VFSNAME_BUF_SIZE ((MAX_NAME_LENGTH + 1) * MAX_CHARSET_SIZE) #define EXFAT_HINT_NONE -1 #define EXFAT_MIN_SUBDIR 2 /* * helpers for cluster size to byte conversion. */ #define EXFAT_CLU_TO_B(b, sbi) ((b) << (sbi)->cluster_size_bits) #define EXFAT_B_TO_CLU(b, sbi) ((b) >> (sbi)->cluster_size_bits) #define EXFAT_B_TO_CLU_ROUND_UP(b, sbi) \ (((b - 1) >> (sbi)->cluster_size_bits) + 1) #define EXFAT_CLU_OFFSET(off, sbi) ((off) & ((sbi)->cluster_size - 1)) /* * helpers for block size to byte conversion. */ #define EXFAT_BLK_TO_B(b, sb) ((b) << (sb)->s_blocksize_bits) #define EXFAT_B_TO_BLK(b, sb) ((b) >> (sb)->s_blocksize_bits) #define EXFAT_B_TO_BLK_ROUND_UP(b, sb) \ (((b - 1) >> (sb)->s_blocksize_bits) + 1) #define EXFAT_BLK_OFFSET(off, sb) ((off) & ((sb)->s_blocksize - 1)) /* * helpers for block size to dentry size conversion. */ #define EXFAT_B_TO_DEN(b) ((b) >> DENTRY_SIZE_BITS) #define EXFAT_DEN_TO_B(b) ((b) << DENTRY_SIZE_BITS) /* * helpers for cluster size to dentry size conversion. */ #define EXFAT_CLU_TO_DEN(clu, sbi) \ ((clu) << ((sbi)->cluster_size_bits - DENTRY_SIZE_BITS)) #define EXFAT_DEN_TO_CLU(dentry, sbi) \ ((dentry) >> ((sbi)->cluster_size_bits - DENTRY_SIZE_BITS)) /* * helpers for fat entry. */ #define FAT_ENT_SIZE (4) #define FAT_ENT_SIZE_BITS (2) #define FAT_ENT_OFFSET_SECTOR(sb, loc) (EXFAT_SB(sb)->FAT1_start_sector + \ (((u64)loc << FAT_ENT_SIZE_BITS) >> sb->s_blocksize_bits)) #define FAT_ENT_OFFSET_BYTE_IN_SECTOR(sb, loc) \ ((loc << FAT_ENT_SIZE_BITS) & (sb->s_blocksize - 1)) /* * helpers for bitmap. */ #define CLUSTER_TO_BITMAP_ENT(clu) ((clu) - EXFAT_RESERVED_CLUSTERS) #define BITMAP_ENT_TO_CLUSTER(ent) ((ent) + EXFAT_RESERVED_CLUSTERS) #define BITS_PER_SECTOR(sb) ((sb)->s_blocksize * BITS_PER_BYTE) #define BITS_PER_SECTOR_MASK(sb) (BITS_PER_SECTOR(sb) - 1) #define BITMAP_OFFSET_SECTOR_INDEX(sb, ent) \ ((ent / BITS_PER_BYTE) >> (sb)->s_blocksize_bits) #define BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent) (ent & BITS_PER_SECTOR_MASK(sb)) #define BITMAP_OFFSET_BYTE_IN_SECTOR(sb, ent) \ ((ent / BITS_PER_BYTE) & ((sb)->s_blocksize - 1)) #define IGNORED_BITS_REMAINED(clu, clu_base) ((1UL << ((clu) - (clu_base))) - 1) #define ES_ENTRY_NUM(name_len) (ES_IDX_LAST_FILENAME(name_len) + 1) /* 19 entries = 1 file entry + 1 stream entry + 17 filename entries */ #define ES_MAX_ENTRY_NUM ES_ENTRY_NUM(MAX_NAME_LENGTH) /* * 19 entries x 32 bytes/entry = 608 bytes. * The 608 bytes are in 3 sectors at most (even 512 Byte sector). */ #define DIR_CACHE_SIZE \ (DIV_ROUND_UP(EXFAT_DEN_TO_B(ES_MAX_ENTRY_NUM), SECTOR_SIZE) + 1) struct exfat_dentry_namebuf { char *lfn; int lfnbuf_len; /* usually MAX_UNINAME_BUF_SIZE */ }; /* unicode name structure */ struct exfat_uni_name { /* +3 for null and for converting */ unsigned short name[MAX_NAME_LENGTH + 3]; u16 name_hash; unsigned char name_len; }; /* directory structure */ struct exfat_chain { unsigned int dir; unsigned int size; unsigned char flags; }; /* first empty entry hint information */ struct exfat_hint_femp { /* entry index of a directory */ int eidx; /* count of continuous empty entry */ int count; /* the cluster that first empty slot exists in */ struct exfat_chain cur; }; /* hint structure */ struct exfat_hint { unsigned int clu; union { unsigned int off; /* cluster offset */ int eidx; /* entry index */ }; }; struct exfat_entry_set_cache { struct super_block *sb; unsigned int start_off; int num_bh; struct buffer_head *__bh[DIR_CACHE_SIZE]; struct buffer_head **bh; unsigned int num_entries; bool modified; }; #define IS_DYNAMIC_ES(es) ((es)->__bh != (es)->bh) struct exfat_dir_entry { struct exfat_chain dir; int entry; unsigned int type; unsigned int start_clu; unsigned char flags; unsigned short attr; loff_t size; loff_t valid_size; unsigned int num_subdirs; struct timespec64 atime; struct timespec64 mtime; struct timespec64 crtime; struct exfat_dentry_namebuf namebuf; }; /* * exfat mount in-memory data */ struct exfat_mount_options { kuid_t fs_uid; kgid_t fs_gid; unsigned short fs_fmask; unsigned short fs_dmask; /* permission for setting the [am]time */ unsigned short allow_utime; /* charset for filename input/display */ char *iocharset; /* on error: continue, panic, remount-ro */ enum exfat_error_mode errors; unsigned utf8:1, /* Use of UTF-8 character set */ sys_tz:1, /* Use local timezone */ discard:1, /* Issue discard requests on deletions */ keep_last_dots:1; /* Keep trailing periods in paths */ int time_offset; /* Offset of timestamps from UTC (in minutes) */ /* Support creating zero-size directory, default: false */ bool zero_size_dir; }; /* * EXFAT file system superblock in-memory data */ struct exfat_sb_info { unsigned long long num_sectors; /* num of sectors in volume */ unsigned int num_clusters; /* num of clusters in volume */ unsigned int cluster_size; /* cluster size in bytes */ unsigned int cluster_size_bits; unsigned int sect_per_clus; /* cluster size in sectors */ unsigned int sect_per_clus_bits; unsigned long long FAT1_start_sector; /* FAT1 start sector */ unsigned long long FAT2_start_sector; /* FAT2 start sector */ unsigned long long data_start_sector; /* data area start sector */ unsigned int num_FAT_sectors; /* num of FAT sectors */ unsigned int root_dir; /* root dir cluster */ unsigned int dentries_per_clu; /* num of dentries per cluster */ unsigned int vol_flags; /* volume flags */ unsigned int vol_flags_persistent; /* volume flags to retain */ struct buffer_head *boot_bh; /* buffer_head of BOOT sector */ unsigned int map_clu; /* allocation bitmap start cluster */ unsigned int map_sectors; /* num of allocation bitmap sectors */ struct buffer_head **vol_amap; /* allocation bitmap */ unsigned short *vol_utbl; /* upcase table */ unsigned int clu_srch_ptr; /* cluster search pointer */ unsigned int used_clusters; /* number of used clusters */ struct mutex s_lock; /* superblock lock */ struct mutex bitmap_lock; /* bitmap lock */ struct exfat_mount_options options; struct nls_table *nls_io; /* Charset used for input and display */ struct ratelimit_state ratelimit; spinlock_t inode_hash_lock; struct hlist_head inode_hashtable[EXFAT_HASH_SIZE]; struct rcu_head rcu; }; #define EXFAT_CACHE_VALID 0 /* * EXFAT file system inode in-memory data */ struct exfat_inode_info { struct exfat_chain dir; int entry; unsigned int type; unsigned short attr; unsigned int start_clu; unsigned char flags; /* * the copy of low 32bit of i_version to check * the validation of hint_stat. */ unsigned int version; /* hint for cluster last accessed */ struct exfat_hint hint_bmap; /* hint for entry index we try to lookup next time */ struct exfat_hint hint_stat; /* hint for first empty entry */ struct exfat_hint_femp hint_femp; spinlock_t cache_lru_lock; struct list_head cache_lru; int nr_caches; /* for avoiding the race between alloc and free */ unsigned int cache_valid_id; /* * NOTE: i_size_ondisk is 64bits, so must hold ->inode_lock to access. * physically allocated size. */ loff_t i_size_ondisk; /* block-aligned i_size (used in cont_write_begin) */ loff_t i_size_aligned; /* on-disk position of directory entry or 0 */ loff_t i_pos; loff_t valid_size; /* hash by i_location */ struct hlist_node i_hash_fat; /* protect bmap against truncate */ struct rw_semaphore truncate_lock; struct inode vfs_inode; /* File creation time */ struct timespec64 i_crtime; }; static inline struct exfat_sb_info *EXFAT_SB(struct super_block *sb) { return sb->s_fs_info; } static inline struct exfat_inode_info *EXFAT_I(struct inode *inode) { return container_of(inode, struct exfat_inode_info, vfs_inode); } /* * If ->i_mode can't hold 0222 (i.e. ATTR_RO), we use ->i_attrs to * save ATTR_RO instead of ->i_mode. * * If it's directory and !sbi->options.rodir, ATTR_RO isn't read-only * bit, it's just used as flag for app. */ static inline int exfat_mode_can_hold_ro(struct inode *inode) { struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb); if (S_ISDIR(inode->i_mode)) return 0; if ((~sbi->options.fs_fmask) & 0222) return 1; return 0; } /* Convert attribute bits and a mask to the UNIX mode. */ static inline mode_t exfat_make_mode(struct exfat_sb_info *sbi, unsigned short attr, mode_t mode) { if ((attr & EXFAT_ATTR_READONLY) && !(attr & EXFAT_ATTR_SUBDIR)) mode &= ~0222; if (attr & EXFAT_ATTR_SUBDIR) return (mode & ~sbi->options.fs_dmask) | S_IFDIR; return (mode & ~sbi->options.fs_fmask) | S_IFREG; } /* Return the FAT attribute byte for this inode */ static inline unsigned short exfat_make_attr(struct inode *inode) { unsigned short attr = EXFAT_I(inode)->attr; if (S_ISDIR(inode->i_mode)) attr |= EXFAT_ATTR_SUBDIR; if (exfat_mode_can_hold_ro(inode) && !(inode->i_mode & 0222)) attr |= EXFAT_ATTR_READONLY; return attr; } static inline void exfat_save_attr(struct inode *inode, unsigned short attr) { if (exfat_mode_can_hold_ro(inode)) EXFAT_I(inode)->attr = attr & (EXFAT_ATTR_RWMASK | EXFAT_ATTR_READONLY); else EXFAT_I(inode)->attr = attr & EXFAT_ATTR_RWMASK; } static inline bool exfat_is_last_sector_in_cluster(struct exfat_sb_info *sbi, sector_t sec) { return ((sec - sbi->data_start_sector + 1) & ((1 << sbi->sect_per_clus_bits) - 1)) == 0; } static inline sector_t exfat_cluster_to_sector(struct exfat_sb_info *sbi, unsigned int clus) { return ((sector_t)(clus - EXFAT_RESERVED_CLUSTERS) << sbi->sect_per_clus_bits) + sbi->data_start_sector; } static inline unsigned int exfat_sector_to_cluster(struct exfat_sb_info *sbi, sector_t sec) { return ((sec - sbi->data_start_sector) >> sbi->sect_per_clus_bits) + EXFAT_RESERVED_CLUSTERS; } static inline bool is_valid_cluster(struct exfat_sb_info *sbi, unsigned int clus) { return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters; } /* super.c */ int exfat_set_volume_dirty(struct super_block *sb); int exfat_clear_volume_dirty(struct super_block *sb); /* fatent.c */ #define exfat_get_next_cluster(sb, pclu) exfat_ent_get(sb, *(pclu), pclu) int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc, struct exfat_chain *p_chain, bool sync_bmap); int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain); int exfat_ent_get(struct super_block *sb, unsigned int loc, unsigned int *content); int exfat_ent_set(struct super_block *sb, unsigned int loc, unsigned int content); int exfat_chain_cont_cluster(struct super_block *sb, unsigned int chain, unsigned int len); int exfat_zeroed_cluster(struct inode *dir, unsigned int clu); int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain, unsigned int *ret_clu); int exfat_count_num_clusters(struct super_block *sb, struct exfat_chain *p_chain, unsigned int *ret_count); /* balloc.c */ int exfat_load_bitmap(struct super_block *sb); void exfat_free_bitmap(struct exfat_sb_info *sbi); int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync); void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync); unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu); int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count); int exfat_trim_fs(struct inode *inode, struct fstrim_range *range); /* file.c */ extern const struct file_operations exfat_file_operations; int __exfat_truncate(struct inode *inode); void exfat_truncate(struct inode *inode); int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr); int exfat_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, unsigned int request_mask, unsigned int query_flags); int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync); long exfat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); long exfat_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); /* namei.c */ extern const struct dentry_operations exfat_dentry_ops; extern const struct dentry_operations exfat_utf8_dentry_ops; /* cache.c */ int exfat_cache_init(void); void exfat_cache_shutdown(void); void exfat_cache_inval_inode(struct inode *inode); int exfat_get_cluster(struct inode *inode, unsigned int cluster, unsigned int *fclus, unsigned int *dclus, unsigned int *last_dclus, int allow_eof); /* dir.c */ extern const struct inode_operations exfat_dir_inode_operations; extern const struct file_operations exfat_dir_operations; unsigned int exfat_get_entry_type(struct exfat_dentry *p_entry); void exfat_init_dir_entry(struct exfat_entry_set_cache *es, unsigned int type, unsigned int start_clu, unsigned long long size, struct timespec64 *ts); void exfat_init_ext_entry(struct exfat_entry_set_cache *es, int num_entries, struct exfat_uni_name *p_uniname); void exfat_remove_entries(struct inode *inode, struct exfat_entry_set_cache *es, int order); void exfat_update_dir_chksum(struct exfat_entry_set_cache *es); int exfat_calc_num_entries(struct exfat_uni_name *p_uniname); int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname, struct exfat_hint *hint_opt); int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu); struct exfat_dentry *exfat_get_dentry(struct super_block *sb, struct exfat_chain *p_dir, int entry, struct buffer_head **bh); struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es, int num); int exfat_get_dentry_set(struct exfat_entry_set_cache *es, struct super_block *sb, struct exfat_chain *p_dir, int entry, unsigned int num_entries); int exfat_get_empty_dentry_set(struct exfat_entry_set_cache *es, struct super_block *sb, struct exfat_chain *p_dir, int entry, unsigned int num_entries); int exfat_put_dentry_set(struct exfat_entry_set_cache *es, int sync); int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir); /* inode.c */ extern const struct inode_operations exfat_file_inode_operations; void exfat_sync_inode(struct inode *inode); struct inode *exfat_build_inode(struct super_block *sb, struct exfat_dir_entry *info, loff_t i_pos); void exfat_hash_inode(struct inode *inode, loff_t i_pos); void exfat_unhash_inode(struct inode *inode); struct inode *exfat_iget(struct super_block *sb, loff_t i_pos); int __exfat_write_inode(struct inode *inode, int sync); int exfat_write_inode(struct inode *inode, struct writeback_control *wbc); void exfat_evict_inode(struct inode *inode); int exfat_block_truncate_page(struct inode *inode, loff_t from); /* exfat/nls.c */ unsigned short exfat_toupper(struct super_block *sb, unsigned short a); int exfat_uniname_ncmp(struct super_block *sb, unsigned short *a, unsigned short *b, unsigned int len); int exfat_utf16_to_nls(struct super_block *sb, struct exfat_uni_name *uniname, unsigned char *p_cstring, int len); int exfat_nls_to_utf16(struct super_block *sb, const unsigned char *p_cstring, const int len, struct exfat_uni_name *uniname, int *p_lossy); int exfat_create_upcase_table(struct super_block *sb); void exfat_free_upcase_table(struct exfat_sb_info *sbi); /* exfat/misc.c */ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...) __printf(3, 4) __cold; #define exfat_fs_error(sb, fmt, args...) \ __exfat_fs_error(sb, 1, fmt, ## args) #define exfat_fs_error_ratelimit(sb, fmt, args...) \ __exfat_fs_error(sb, __ratelimit(&EXFAT_SB(sb)->ratelimit), \ fmt, ## args) /* expand to pr_*() with prefix */ #define exfat_err(sb, fmt, ...) \ pr_err("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__) #define exfat_warn(sb, fmt, ...) \ pr_warn("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__) #define exfat_info(sb, fmt, ...) \ pr_info("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__) #define exfat_debug(sb, fmt, ...) \ pr_debug("exFAT-fs (%s): " fmt "\n", (sb)->s_id, ##__VA_ARGS__) void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, u8 tz, __le16 time, __le16 date, u8 time_cs); void exfat_truncate_atime(struct timespec64 *ts); void exfat_truncate_inode_atime(struct inode *inode); void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, u8 *tz, __le16 *time, __le16 *date, u8 *time_cs); u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type); u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type); void exfat_update_bh(struct buffer_head *bh, int sync); int exfat_update_bhs(struct buffer_head **bhs, int nr_bhs, int sync); void exfat_chain_set(struct exfat_chain *ec, unsigned int dir, unsigned int size, unsigned char flags); void exfat_chain_dup(struct exfat_chain *dup, struct exfat_chain *ec); #endif /* !_EXFAT_FS_H */ |
| 629 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_ICMPV6_H #define _LINUX_ICMPV6_H #include <linux/skbuff.h> #include <linux/ipv6.h> #include <uapi/linux/icmpv6.h> static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) { return (struct icmp6hdr *)skb_transport_header(skb); } #include <linux/netdevice.h> #if IS_ENABLED(CONFIG_IPV6) typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct in6_addr *force_saddr, const struct inet6_skb_parm *parm); void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct in6_addr *force_saddr, const struct inet6_skb_parm *parm); #if IS_BUILTIN(CONFIG_IPV6) static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct inet6_skb_parm *parm) { icmp6_send(skb, type, code, info, NULL, parm); } static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn) { BUILD_BUG_ON(fn != icmp6_send); return 0; } static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn) { BUILD_BUG_ON(fn != icmp6_send); return 0; } #else extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, const struct inet6_skb_parm *parm); extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); #endif static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { __icmpv6_send(skb, type, code, info, IP6CB(skb)); } int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, unsigned int data_len); #if IS_ENABLED(CONFIG_NF_NAT) void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); #else static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) { struct inet6_skb_parm parm = { 0 }; __icmpv6_send(skb_in, type, code, info, &parm); } #endif #else static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { } static inline void icmpv6_ndo_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) { } #endif extern int icmpv6_init(void); extern int icmpv6_err_convert(u8 type, u8 code, int *err); extern void icmpv6_cleanup(void); extern void icmpv6_param_prob_reason(struct sk_buff *skb, u8 code, int pos, enum skb_drop_reason reason); struct flowi6; struct in6_addr; void icmpv6_flow_init(const struct sock *sk, struct flowi6 *fl6, u8 type, const struct in6_addr *saddr, const struct in6_addr *daddr, int oif); static inline void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) { icmpv6_param_prob_reason(skb, code, pos, SKB_DROP_REASON_NOT_SPECIFIED); } static inline bool icmpv6_is_err(int type) { switch (type) { case ICMPV6_DEST_UNREACH: case ICMPV6_PKT_TOOBIG: case ICMPV6_TIME_EXCEED: case ICMPV6_PARAMPROB: return true; } return false; } #endif |
| 274 4 2 35 1 238 69 44 44 44 44 5 96 32 99 2 2 2 2 100 104 103 105 105 105 105 104 105 105 105 143 143 143 139 66 66 143 142 143 105 1 105 141 142 136 67 142 53 142 107 14 141 142 135 68 68 35 134 135 135 136 136 136 135 136 67 67 66 66 264 265 265 6 265 264 265 263 264 265 263 264 70 13 58 58 58 58 69 70 70 70 70 70 30 41 41 5 5 36 70 1 1 1 1 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 21 21 21 7 1 7 7 4 21 101 101 101 101 101 100 101 101 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 37 38 2 2 2 2 2 19 19 19 19 19 13 12 1 21 21 21 21 21 21 20 95 96 21 21 21 21 95 95 13 20 21 96 94 96 96 91 15 96 96 20 21 21 21 6 10 10 10 10 10 10 10 10 10 10 10 186 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2024 Intel Corporation * * utilities for mac80211 */ #include <net/mac80211.h> #include <linux/netdevice.h> #include <linux/export.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/bitmap.h> #include <linux/crc32.h> #include <net/net_namespace.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <kunit/visibility.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wme.h" #include "led.h" #include "wep.h" /* privid for wiphys to determine whether they belong to us or not */ const void *const mac80211_wiphy_privid = &mac80211_wiphy_privid; struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy) { struct ieee80211_local *local; local = wiphy_priv(wiphy); return &local->hw; } EXPORT_SYMBOL(wiphy_to_ieee80211_hw); const struct ieee80211_conn_settings ieee80211_conn_settings_unlimited = { .mode = IEEE80211_CONN_MODE_EHT, .bw_limit = IEEE80211_CONN_BW_LIMIT_320, }; u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, enum nl80211_iftype type) { __le16 fc = hdr->frame_control; if (ieee80211_is_data(fc)) { if (len < 24) /* drop incorrect hdr len (data) */ return NULL; if (ieee80211_has_a4(fc)) return NULL; if (ieee80211_has_tods(fc)) return hdr->addr1; if (ieee80211_has_fromds(fc)) return hdr->addr2; return hdr->addr3; } if (ieee80211_is_s1g_beacon(fc)) { struct ieee80211_ext *ext = (void *) hdr; return ext->u.s1g_beacon.sa; } if (ieee80211_is_mgmt(fc)) { if (len < 24) /* drop incorrect hdr len (mgmt) */ return NULL; return hdr->addr3; } if (ieee80211_is_ctl(fc)) { if (ieee80211_is_pspoll(fc)) return hdr->addr1; if (ieee80211_is_back_req(fc)) { switch (type) { case NL80211_IFTYPE_STATION: return hdr->addr2; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: return hdr->addr1; default: break; /* fall through to the return */ } } } return NULL; } EXPORT_SYMBOL(ieee80211_get_bssid); void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_hdr *hdr; skb_queue_walk(&tx->skbs, skb) { hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); } } int ieee80211_frame_duration(enum nl80211_band band, size_t len, int rate, int erp, int short_preamble) { int dur; /* calculate duration (in microseconds, rounded up to next higher * integer if it includes a fractional microsecond) to send frame of * len bytes (does not include FCS) at the given rate. Duration will * also include SIFS. * * rate is in 100 kbps, so divident is multiplied by 10 in the * DIV_ROUND_UP() operations. */ if (band == NL80211_BAND_5GHZ || erp) { /* * OFDM: * * N_DBPS = DATARATE x 4 * N_SYM = Ceiling((16+8xLENGTH+6) / N_DBPS) * (16 = SIGNAL time, 6 = tail bits) * TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext * * T_SYM = 4 usec * 802.11a - 18.5.2: aSIFSTime = 16 usec * 802.11g - 19.8.4: aSIFSTime = 10 usec + * signal ext = 6 usec */ dur = 16; /* SIFS + signal ext */ dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */ dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */ /* rates should already consider the channel bandwidth, * don't apply divisor again. */ dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10, 4 * rate); /* T_SYM x N_SYM */ } else { /* * 802.11b or 802.11g with 802.11b compatibility: * 18.3.4: TXTIME = PreambleLength + PLCPHeaderTime + * Ceiling(((LENGTH+PBCC)x8)/DATARATE). PBCC=0. * * 802.11 (DS): 15.3.3, 802.11b: 18.3.4 * aSIFSTime = 10 usec * aPreambleLength = 144 usec or 72 usec with short preamble * aPLCPHeaderLength = 48 usec or 24 usec with short preamble */ dur = 10; /* aSIFSTime = 10 usec */ dur += short_preamble ? (72 + 24) : (144 + 48); dur += DIV_ROUND_UP(8 * (len + 4) * 10, rate); } return dur; } /* Exported duration function for driver use */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_band band, size_t frame_len, struct ieee80211_rate *rate) { struct ieee80211_sub_if_data *sdata; u16 dur; int erp; bool short_preamble = false; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->deflink.operating_11g_mode) erp = rate->flags & IEEE80211_RATE_ERP_G; } dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp, short_preamble); return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_generic_frame_duration); __le16 ieee80211_rts_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_info *frame_txctl) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; int erp, bitrate; u16 dur; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->deflink.operating_11g_mode) erp = rate->flags & IEEE80211_RATE_ERP_G; } bitrate = rate->bitrate; /* CTS duration */ dur = ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble); /* Data frame duration */ dur += ieee80211_frame_duration(sband->band, frame_len, bitrate, erp, short_preamble); /* ACK duration */ dur += ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble); return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_rts_duration); __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, size_t frame_len, const struct ieee80211_tx_info *frame_txctl) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_rate *rate; struct ieee80211_sub_if_data *sdata; bool short_preamble; int erp, bitrate; u16 dur; struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[frame_txctl->band]; short_preamble = false; rate = &sband->bitrates[frame_txctl->control.rts_cts_rate_idx]; erp = 0; if (vif) { sdata = vif_to_sdata(vif); short_preamble = sdata->vif.bss_conf.use_short_preamble; if (sdata->deflink.operating_11g_mode) erp = rate->flags & IEEE80211_RATE_ERP_G; } bitrate = rate->bitrate; /* Data frame duration */ dur = ieee80211_frame_duration(sband->band, frame_len, bitrate, erp, short_preamble); if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { /* ACK duration */ dur += ieee80211_frame_duration(sband->band, 10, bitrate, erp, short_preamble); } return cpu_to_le16(dur); } EXPORT_SYMBOL(ieee80211_ctstoself_duration); static void wake_tx_push_queue(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_txq *queue) { struct ieee80211_tx_control control = { .sta = queue->sta, }; struct sk_buff *skb; while (1) { skb = ieee80211_tx_dequeue(&local->hw, queue); if (!skb) break; drv_tx(local, &control, skb); } } /* wake_tx_queue handler for driver not implementing a custom one*/ void ieee80211_handle_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) { struct ieee80211_local *local = hw_to_local(hw); struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif); struct ieee80211_txq *queue; spin_lock(&local->handle_wake_tx_queue_lock); /* Use ieee80211_next_txq() for airtime fairness accounting */ ieee80211_txq_schedule_start(hw, txq->ac); while ((queue = ieee80211_next_txq(hw, txq->ac))) { wake_tx_push_queue(local, sdata, queue); ieee80211_return_txq(hw, queue, false); } ieee80211_txq_schedule_end(hw, txq->ac); spin_unlock(&local->handle_wake_tx_queue_lock); } EXPORT_SYMBOL(ieee80211_handle_wake_tx_queue); static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac) { struct ieee80211_local *local = sdata->local; struct ieee80211_vif *vif = &sdata->vif; struct fq *fq = &local->fq; struct ps_data *ps = NULL; struct txq_info *txqi; struct sta_info *sta; int i; local_bh_disable(); spin_lock(&fq->lock); if (!test_bit(SDATA_STATE_RUNNING, &sdata->state)) goto out; if (sdata->vif.type == NL80211_IFTYPE_AP) ps = &sdata->bss->ps; list_for_each_entry_rcu(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; for (i = 0; i < ARRAY_SIZE(sta->sta.txq); i++) { struct ieee80211_txq *txq = sta->sta.txq[i]; if (!txq) continue; txqi = to_txq_info(txq); if (ac != txq->ac) continue; if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags)) continue; spin_unlock(&fq->lock); drv_wake_tx_queue(local, txqi); spin_lock(&fq->lock); } } if (!vif->txq) goto out; txqi = to_txq_info(vif->txq); if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) || (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac) goto out; spin_unlock(&fq->lock); drv_wake_tx_queue(local, txqi); local_bh_enable(); return; out: spin_unlock(&fq->lock); local_bh_enable(); } static void __releases(&local->queue_stop_reason_lock) __acquires(&local->queue_stop_reason_lock) _ieee80211_wake_txqs(struct ieee80211_local *local, unsigned long *flags) { struct ieee80211_sub_if_data *sdata; int n_acs = IEEE80211_NUM_ACS; int i; rcu_read_lock(); if (local->hw.queues < IEEE80211_NUM_ACS) n_acs = 1; for (i = 0; i < local->hw.queues; i++) { if (local->queue_stop_reasons[i]) continue; spin_unlock_irqrestore(&local->queue_stop_reason_lock, *flags); list_for_each_entry_rcu(sdata, &local->interfaces, list) { int ac; for (ac = 0; ac < n_acs; ac++) { int ac_queue = sdata->vif.hw_queue[ac]; if (ac_queue == i || sdata->vif.cab_queue == i) __ieee80211_wake_txqs(sdata, ac); } } spin_lock_irqsave(&local->queue_stop_reason_lock, *flags); } rcu_read_unlock(); } void ieee80211_wake_txqs(struct tasklet_struct *t) { struct ieee80211_local *local = from_tasklet(local, t, wake_txqs_tasklet); unsigned long flags; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); _ieee80211_wake_txqs(local, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted, unsigned long *flags) { struct ieee80211_local *local = hw_to_local(hw); trace_wake_queue(local, queue, reason); if (WARN_ON(queue >= hw->queues)) return; if (!test_bit(reason, &local->queue_stop_reasons[queue])) return; if (!refcounted) { local->q_stop_reasons[queue][reason] = 0; } else { local->q_stop_reasons[queue][reason]--; if (WARN_ON(local->q_stop_reasons[queue][reason] < 0)) local->q_stop_reasons[queue][reason] = 0; } if (local->q_stop_reasons[queue][reason] == 0) __clear_bit(reason, &local->queue_stop_reasons[queue]); if (local->queue_stop_reasons[queue] != 0) /* someone still has this queue stopped */ return; if (!skb_queue_empty(&local->pending[queue])) tasklet_schedule(&local->tx_pending_tasklet); /* * Calling _ieee80211_wake_txqs here can be a problem because it may * release queue_stop_reason_lock which has been taken by * __ieee80211_wake_queue's caller. It is certainly not very nice to * release someone's lock, but it is fine because all the callers of * __ieee80211_wake_queue call it right before releasing the lock. */ if (reason == IEEE80211_QUEUE_STOP_REASON_DRIVER) tasklet_schedule(&local->wake_txqs_tasklet); else _ieee80211_wake_txqs(local, flags); } void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_wake_queue(hw, queue, reason, refcounted, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_wake_queue(struct ieee80211_hw *hw, int queue) { ieee80211_wake_queue_by_reason(hw, queue, IEEE80211_QUEUE_STOP_REASON_DRIVER, false); } EXPORT_SYMBOL(ieee80211_wake_queue); static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); trace_stop_queue(local, queue, reason); if (WARN_ON(queue >= hw->queues)) return; if (!refcounted) local->q_stop_reasons[queue][reason] = 1; else local->q_stop_reasons[queue][reason]++; set_bit(reason, &local->queue_stop_reasons[queue]); } void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_stop_queue(hw, queue, reason, refcounted); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queue(struct ieee80211_hw *hw, int queue) { ieee80211_stop_queue_by_reason(hw, queue, IEEE80211_QUEUE_STOP_REASON_DRIVER, false); } EXPORT_SYMBOL(ieee80211_stop_queue); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb) { struct ieee80211_hw *hw = &local->hw; unsigned long flags; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int queue = info->hw_queue; if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); return; } spin_lock_irqsave(&local->queue_stop_reason_lock, flags); __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, false); __skb_queue_tail(&local->pending[queue], skb); __ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, false, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_add_pending_skbs(struct ieee80211_local *local, struct sk_buff_head *skbs) { struct ieee80211_hw *hw = &local->hw; struct sk_buff *skb; unsigned long flags; int queue, i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); while ((skb = skb_dequeue(skbs))) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (WARN_ON(!info->control.vif)) { ieee80211_free_txskb(&local->hw, skb); continue; } queue = info->hw_queue; __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, false); __skb_queue_tail(&local->pending[queue], skb); } for (i = 0; i < hw->queues; i++) __ieee80211_wake_queue(hw, i, IEEE80211_QUEUE_STOP_REASON_SKB_ADD, false, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for_each_set_bit(i, &queues, hw->queues) __ieee80211_stop_queue(hw, i, reason, refcounted); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_stop_queues(struct ieee80211_hw *hw) { ieee80211_stop_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_DRIVER, false); } EXPORT_SYMBOL(ieee80211_stop_queues); int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int ret; if (WARN_ON(queue >= hw->queues)) return true; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); ret = test_bit(IEEE80211_QUEUE_STOP_REASON_DRIVER, &local->queue_stop_reasons[queue]); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return ret; } EXPORT_SYMBOL(ieee80211_queue_stopped); void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, unsigned long queues, enum queue_stop_reason reason, bool refcounted) { struct ieee80211_local *local = hw_to_local(hw); unsigned long flags; int i; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for_each_set_bit(i, &queues, hw->queues) __ieee80211_wake_queue(hw, i, reason, refcounted, &flags); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } void ieee80211_wake_queues(struct ieee80211_hw *hw) { ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_DRIVER, false); } EXPORT_SYMBOL(ieee80211_wake_queues); static unsigned int ieee80211_get_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { unsigned int queues; if (sdata && ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) { int ac; queues = 0; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) queues |= BIT(sdata->vif.hw_queue[ac]); if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE) queues |= BIT(sdata->vif.cab_queue); } else { /* all queues */ queues = BIT(local->hw.queues) - 1; } return queues; } void __ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int queues, bool drop) { if (!local->ops->flush) return; /* * If no queue was set, or if the HW doesn't support * IEEE80211_HW_QUEUE_CONTROL - flush all queues */ if (!queues || !ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) queues = ieee80211_get_vif_queues(local, sdata); ieee80211_stop_queues_by_reason(&local->hw, queues, IEEE80211_QUEUE_STOP_REASON_FLUSH, false); if (drop) { struct sta_info *sta; /* Purge the queues, so the frames on them won't be * sent during __ieee80211_wake_queue() */ list_for_each_entry(sta, &local->sta_list, list) { if (sdata != sta->sdata) continue; ieee80211_purge_sta_txqs(sta); } } drv_flush(local, sdata, queues, drop); ieee80211_wake_queues_by_reason(&local->hw, queues, IEEE80211_QUEUE_STOP_REASON_FLUSH, false); } void ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, bool drop) { __ieee80211_flush_queues(local, sdata, 0, drop); } void ieee80211_stop_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) { ieee80211_stop_queues_by_reason(&local->hw, ieee80211_get_vif_queues(local, sdata), reason, true); } void ieee80211_wake_vif_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum queue_stop_reason reason) { ieee80211_wake_queues_by_reason(&local->hw, ieee80211_get_vif_queues(local, sdata), reason, true); } static void __iterate_interfaces(struct ieee80211_local *local, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_sub_if_data *sdata; bool active_only = iter_flags & IEEE80211_IFACE_ITER_ACTIVE; list_for_each_entry_rcu(sdata, &local->interfaces, list) { switch (sdata->vif.type) { case NL80211_IFTYPE_MONITOR: if (!(sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE)) continue; break; case NL80211_IFTYPE_AP_VLAN: continue; default: break; } if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) && active_only && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) continue; if ((iter_flags & IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) continue; if (ieee80211_sdata_running(sdata) || !active_only) iterator(data, sdata->vif.addr, &sdata->vif); } sdata = rcu_dereference_check(local->monitor_sdata, lockdep_is_held(&local->iflist_mtx) || lockdep_is_held(&local->hw.wiphy->mtx)); if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF) && (iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL || !active_only || sdata->flags & IEEE80211_SDATA_IN_DRIVER)) iterator(data, sdata->vif.addr, &sdata->vif); } void ieee80211_iterate_interfaces( struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_local *local = hw_to_local(hw); mutex_lock(&local->iflist_mtx); __iterate_interfaces(local, iter_flags, iterator, data); mutex_unlock(&local->iflist_mtx); } EXPORT_SYMBOL_GPL(ieee80211_iterate_interfaces); void ieee80211_iterate_active_interfaces_atomic( struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_local *local = hw_to_local(hw); rcu_read_lock(); __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE, iterator, data); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic); void ieee80211_iterate_active_interfaces_mtx( struct ieee80211_hw *hw, u32 iter_flags, void (*iterator)(void *data, u8 *mac, struct ieee80211_vif *vif), void *data) { struct ieee80211_local *local = hw_to_local(hw); lockdep_assert_wiphy(hw->wiphy); __iterate_interfaces(local, iter_flags | IEEE80211_IFACE_ITER_ACTIVE, iterator, data); } EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_mtx); static void __iterate_stations(struct ieee80211_local *local, void (*iterator)(void *data, struct ieee80211_sta *sta), void *data) { struct sta_info *sta; list_for_each_entry_rcu(sta, &local->sta_list, list) { if (!sta->uploaded) continue; iterator(data, &sta->sta); } } void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw, void (*iterator)(void *data, struct ieee80211_sta *sta), void *data) { struct ieee80211_local *local = hw_to_local(hw); rcu_read_lock(); __iterate_stations(local, iterator, data); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_atomic); struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev) { struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); if (!ieee80211_sdata_running(sdata) || !(sdata->flags & IEEE80211_SDATA_IN_DRIVER)) return NULL; return &sdata->vif; } EXPORT_SYMBOL_GPL(wdev_to_ieee80211_vif); struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif) { if (!vif) return NULL; return &vif_to_sdata(vif)->wdev; } EXPORT_SYMBOL_GPL(ieee80211_vif_to_wdev); /* * Nothing should have been stuffed into the workqueue during * the suspend->resume cycle. Since we can't check each caller * of this function if we are already quiescing / suspended, * check here and don't WARN since this can actually happen when * the rx path (for example) is racing against __ieee80211_suspend * and suspending / quiescing was set after the rx path checked * them. */ static bool ieee80211_can_queue_work(struct ieee80211_local *local) { if (local->quiescing || (local->suspended && !local->resuming)) { pr_warn("queueing ieee80211 work while going to suspend\n"); return false; } return true; } void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work) { struct ieee80211_local *local = hw_to_local(hw); if (!ieee80211_can_queue_work(local)) return; queue_work(local->workqueue, work); } EXPORT_SYMBOL(ieee80211_queue_work); void ieee80211_queue_delayed_work(struct ieee80211_hw *hw, struct delayed_work *dwork, unsigned long delay) { struct ieee80211_local *local = hw_to_local(hw); if (!ieee80211_can_queue_work(local)) return; queue_delayed_work(local->workqueue, dwork, delay); } EXPORT_SYMBOL(ieee80211_queue_delayed_work); void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_queue_params *qparam, int ac) { struct ieee80211_chanctx_conf *chanctx_conf; const struct ieee80211_reg_rule *rrule; const struct ieee80211_wmm_ac *wmm_ac; u16 center_freq = 0; if (sdata->vif.type != NL80211_IFTYPE_AP && sdata->vif.type != NL80211_IFTYPE_STATION) return; rcu_read_lock(); chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf); if (chanctx_conf) center_freq = chanctx_conf->def.chan->center_freq; if (!center_freq) { rcu_read_unlock(); return; } rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq)); if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) { rcu_read_unlock(); return; } if (sdata->vif.type == NL80211_IFTYPE_AP) wmm_ac = &rrule->wmm_rule.ap[ac]; else wmm_ac = &rrule->wmm_rule.client[ac]; qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min); qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max); qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn); qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32); rcu_read_unlock(); } void ieee80211_set_wmm_default(struct ieee80211_link_data *link, bool bss_notify, bool enable_qos) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_tx_queue_params qparam; struct ieee80211_chanctx_conf *chanctx_conf; int ac; bool use_11b; bool is_ocb; /* Use another EDCA parameters if dot11OCBActivated=true */ int aCWmin, aCWmax; if (!local->ops->conf_tx) return; if (local->hw.queues < IEEE80211_NUM_ACS) return; memset(&qparam, 0, sizeof(qparam)); rcu_read_lock(); chanctx_conf = rcu_dereference(link->conf->chanctx_conf); use_11b = (chanctx_conf && chanctx_conf->def.chan->band == NL80211_BAND_2GHZ) && !link->operating_11g_mode; rcu_read_unlock(); is_ocb = (sdata->vif.type == NL80211_IFTYPE_OCB); /* Set defaults according to 802.11-2007 Table 7-37 */ aCWmax = 1023; if (use_11b) aCWmin = 31; else aCWmin = 15; /* Confiure old 802.11b/g medium access rules. */ qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; qparam.aifs = 2; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { /* Update if QoS is enabled. */ if (enable_qos) { switch (ac) { case IEEE80211_AC_BK: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; if (is_ocb) qparam.aifs = 9; else qparam.aifs = 7; break; /* never happens but let's not leave undefined */ default: case IEEE80211_AC_BE: qparam.cw_max = aCWmax; qparam.cw_min = aCWmin; qparam.txop = 0; if (is_ocb) qparam.aifs = 6; else qparam.aifs = 3; break; case IEEE80211_AC_VI: qparam.cw_max = aCWmin; qparam.cw_min = (aCWmin + 1) / 2 - 1; if (is_ocb) qparam.txop = 0; else if (use_11b) qparam.txop = 6016/32; else qparam.txop = 3008/32; if (is_ocb) qparam.aifs = 3; else qparam.aifs = 2; break; case IEEE80211_AC_VO: qparam.cw_max = (aCWmin + 1) / 2 - 1; qparam.cw_min = (aCWmin + 1) / 4 - 1; if (is_ocb) qparam.txop = 0; else if (use_11b) qparam.txop = 3264/32; else qparam.txop = 1504/32; qparam.aifs = 2; break; } } ieee80211_regulatory_limit_wmm_params(sdata, &qparam, ac); qparam.uapsd = false; link->tx_conf[ac] = qparam; drv_conf_tx(local, link, ac, &qparam); } if (sdata->vif.type != NL80211_IFTYPE_MONITOR && sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && sdata->vif.type != NL80211_IFTYPE_NAN) { link->conf->qos = enable_qos; if (bss_notify) ieee80211_link_info_change_notify(sdata, link, BSS_CHANGED_QOS); } } void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, u16 transaction, u16 auth_alg, u16 status, const u8 *extra, size_t extra_len, const u8 *da, const u8 *bssid, const u8 *key, u8 key_len, u8 key_idx, u32 tx_flags) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; bool multi_link = ieee80211_vif_is_mld(&sdata->vif); struct { u8 id; u8 len; u8 ext_id; struct ieee80211_multi_link_elem ml; struct ieee80211_mle_basic_common_info basic; } __packed mle = { .id = WLAN_EID_EXTENSION, .len = sizeof(mle) - 2, .ext_id = WLAN_EID_EXT_EHT_MULTI_LINK, .ml.control = cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_BASIC), .basic.len = sizeof(mle.basic), }; int err; memcpy(mle.basic.mld_mac_addr, sdata->vif.addr, ETH_ALEN); /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN + 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN + multi_link * sizeof(mle)); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN); mgmt = skb_put_zero(skb, 24 + 6); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, bssid, ETH_ALEN); mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg); mgmt->u.auth.auth_transaction = cpu_to_le16(transaction); mgmt->u.auth.status_code = cpu_to_le16(status); if (extra) skb_put_data(skb, extra, extra_len); if (multi_link) skb_put_data(skb, &mle, sizeof(mle)); if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) { mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx); if (WARN_ON(err)) { kfree_skb(skb); return; } } IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT | tx_flags; ieee80211_tx_skb(sdata, skb); } void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, const u8 *da, const u8 *bssid, u16 stype, u16 reason, bool send_frame, u8 *frame_buf) { struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct ieee80211_mgmt *mgmt = (void *)frame_buf; /* build frame */ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); mgmt->duration = 0; /* initialize only */ mgmt->seq_ctrl = 0; /* initialize only */ memcpy(mgmt->da, da, ETH_ALEN); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); memcpy(mgmt->bssid, bssid, ETH_ALEN); /* u.deauth.reason_code == u.disassoc.reason_code */ mgmt->u.deauth.reason_code = cpu_to_le16(reason); if (send_frame) { skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_DEAUTH_FRAME_LEN); if (!skb) return; skb_reserve(skb, local->hw.extra_tx_headroom); /* copy in frame */ skb_put_data(skb, mgmt, IEEE80211_DEAUTH_FRAME_LEN); if (sdata->vif.type != NL80211_IFTYPE_STATION || !(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED)) IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; ieee80211_tx_skb(sdata, skb); } } static int ieee80211_put_s1g_cap(struct sk_buff *skb, struct ieee80211_sta_s1g_cap *s1g_cap) { if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_s1g_cap)) return -ENOBUFS; skb_put_u8(skb, WLAN_EID_S1G_CAPABILITIES); skb_put_u8(skb, sizeof(struct ieee80211_s1g_cap)); skb_put_data(skb, &s1g_cap->cap, sizeof(s1g_cap->cap)); skb_put_data(skb, &s1g_cap->nss_mcs, sizeof(s1g_cap->nss_mcs)); return 0; } static int ieee80211_put_preq_ies_band(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata, const u8 *ie, size_t ie_len, size_t *offset, enum nl80211_band band, u32 rate_mask, struct cfg80211_chan_def *chandef, u32 flags) { struct ieee80211_local *local = sdata->local; struct ieee80211_supported_band *sband; int i, err; size_t noffset; u32 rate_flags; bool have_80mhz = false; *offset = 0; sband = local->hw.wiphy->bands[band]; if (WARN_ON_ONCE(!sband)) return 0; rate_flags = ieee80211_chandef_rate_flags(chandef); /* For direct scan add S1G IE and consider its override bits */ if (band == NL80211_BAND_S1GHZ) return ieee80211_put_s1g_cap(skb, &sband->s1g_cap); err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags, ~rate_mask, WLAN_EID_SUPP_RATES); if (err) return err; /* insert "request information" if in custom IEs */ if (ie && ie_len) { static const u8 before_extrates[] = { WLAN_EID_SSID, WLAN_EID_SUPP_RATES, WLAN_EID_REQUEST, }; noffset = ieee80211_ie_split(ie, ie_len, before_extrates, ARRAY_SIZE(before_extrates), *offset); if (skb_tailroom(skb) < noffset - *offset) return -ENOBUFS; skb_put_data(skb, ie + *offset, noffset - *offset); *offset = noffset; } err = ieee80211_put_srates_elem(skb, sband, 0, rate_flags, ~rate_mask, WLAN_EID_EXT_SUPP_RATES); if (err) return err; if (chandef->chan && sband->band == NL80211_BAND_2GHZ) { if (skb_tailroom(skb) < 3) return -ENOBUFS; skb_put_u8(skb, WLAN_EID_DS_PARAMS); skb_put_u8(skb, 1); skb_put_u8(skb, ieee80211_frequency_to_channel(chandef->chan->center_freq)); } if (flags & IEEE80211_PROBE_FLAG_MIN_CONTENT) return 0; /* insert custom IEs that go before HT */ if (ie && ie_len) { static const u8 before_ht[] = { /* * no need to list the ones split off already * (or generated here) */ WLAN_EID_DS_PARAMS, WLAN_EID_SUPPORTED_REGULATORY_CLASSES, }; noffset = ieee80211_ie_split(ie, ie_len, before_ht, ARRAY_SIZE(before_ht), *offset); if (skb_tailroom(skb) < noffset - *offset) return -ENOBUFS; skb_put_data(skb, ie + *offset, noffset - *offset); *offset = noffset; } if (sband->ht_cap.ht_supported) { u8 *pos; if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_cap)) return -ENOBUFS; pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_cap)); ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, sband->ht_cap.cap); } /* insert custom IEs that go before VHT */ if (ie && ie_len) { static const u8 before_vht[] = { /* * no need to list the ones split off already * (or generated here) */ WLAN_EID_BSS_COEX_2040, WLAN_EID_EXT_CAPABILITY, WLAN_EID_SSID_LIST, WLAN_EID_CHANNEL_USAGE, WLAN_EID_INTERWORKING, WLAN_EID_MESH_ID, /* 60 GHz (Multi-band, DMG, MMS) can't happen */ }; noffset = ieee80211_ie_split(ie, ie_len, before_vht, ARRAY_SIZE(before_vht), *offset); if (skb_tailroom(skb) < noffset - *offset) return -ENOBUFS; skb_put_data(skb, ie + *offset, noffset - *offset); *offset = noffset; } /* Check if any channel in this sband supports at least 80 MHz */ for (i = 0; i < sband->n_channels; i++) { if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_NO_80MHZ)) continue; have_80mhz = true; break; } if (sband->vht_cap.vht_supported && have_80mhz) { u8 *pos; if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_vht_cap)) return -ENOBUFS; pos = skb_put(skb, 2 + sizeof(struct ieee80211_vht_cap)); ieee80211_ie_build_vht_cap(pos, &sband->vht_cap, sband->vht_cap.cap); } /* insert custom IEs that go before HE */ if (ie && ie_len) { static const u8 before_he[] = { /* * no need to list the ones split off before VHT * or generated here */ WLAN_EID_EXTENSION, WLAN_EID_EXT_FILS_REQ_PARAMS, WLAN_EID_AP_CSN, /* TODO: add 11ah/11aj/11ak elements */ }; noffset = ieee80211_ie_split(ie, ie_len, before_he, ARRAY_SIZE(before_he), *offset); if (skb_tailroom(skb) < noffset - *offset) return -ENOBUFS; skb_put_data(skb, ie + *offset, noffset - *offset); *offset = noffset; } if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band), IEEE80211_CHAN_NO_HE)) { err = ieee80211_put_he_cap(skb, sdata, sband, NULL); if (err) return err; } if (cfg80211_any_usable_channels(local->hw.wiphy, BIT(sband->band), IEEE80211_CHAN_NO_HE | IEEE80211_CHAN_NO_EHT)) { err = ieee80211_put_eht_cap(skb, sdata, sband, NULL); if (err) return err; } err = ieee80211_put_he_6ghz_cap(skb, sdata, IEEE80211_SMPS_OFF); if (err) return err; /* * If adding more here, adjust code in main.c * that calculates local->scan_ies_len. */ return 0; } static int ieee80211_put_preq_ies(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, struct cfg80211_chan_def *chandef, u32 flags) { size_t custom_ie_offset = 0; int i, err; memset(ie_desc, 0, sizeof(*ie_desc)); for (i = 0; i < NUM_NL80211_BANDS; i++) { if (bands_used & BIT(i)) { ie_desc->ies[i] = skb_tail_pointer(skb); err = ieee80211_put_preq_ies_band(skb, sdata, ie, ie_len, &custom_ie_offset, i, rate_masks[i], chandef, flags); if (err) return err; ie_desc->len[i] = skb_tail_pointer(skb) - ie_desc->ies[i]; } } /* add any remaining custom IEs */ if (ie && ie_len) { if (WARN_ONCE(skb_tailroom(skb) < ie_len - custom_ie_offset, "not enough space for preq custom IEs\n")) return -ENOBUFS; ie_desc->common_ies = skb_tail_pointer(skb); skb_put_data(skb, ie + custom_ie_offset, ie_len - custom_ie_offset); ie_desc->common_ie_len = skb_tail_pointer(skb) - ie_desc->common_ies; } return 0; }; int ieee80211_build_preq_ies(struct ieee80211_sub_if_data *sdata, u8 *buffer, size_t buffer_len, struct ieee80211_scan_ies *ie_desc, const u8 *ie, size_t ie_len, u8 bands_used, u32 *rate_masks, struct cfg80211_chan_def *chandef, u32 flags) { struct sk_buff *skb = alloc_skb(buffer_len, GFP_KERNEL); uintptr_t offs; int ret, i; u8 *start; if (!skb) return -ENOMEM; start = skb_tail_pointer(skb); memset(start, 0, skb_tailroom(skb)); ret = ieee80211_put_preq_ies(skb, sdata, ie_desc, ie, ie_len, bands_used, rate_masks, chandef, flags); if (ret < 0) { goto out; } if (skb->len > buffer_len) { ret = -ENOBUFS; goto out; } memcpy(buffer, start, skb->len); /* adjust ie_desc for copy */ for (i = 0; i < NUM_NL80211_BANDS; i++) { offs = ie_desc->ies[i] - start; ie_desc->ies[i] = buffer + offs; } offs = ie_desc->common_ies - start; ie_desc->common_ies = buffer + offs; ret = skb->len; out: consume_skb(skb); return ret; } struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, const u8 *src, const u8 *dst, u32 ratemask, struct ieee80211_channel *chan, const u8 *ssid, size_t ssid_len, const u8 *ie, size_t ie_len, u32 flags) { struct ieee80211_local *local = sdata->local; struct cfg80211_chan_def chandef; struct sk_buff *skb; struct ieee80211_mgmt *mgmt; u32 rate_masks[NUM_NL80211_BANDS] = {}; struct ieee80211_scan_ies dummy_ie_desc; /* * Do not send DS Channel parameter for directed probe requests * in order to maximize the chance that we get a response. Some * badly-behaved APs don't respond when this parameter is included. */ chandef.width = sdata->vif.bss_conf.chanreq.oper.width; if (flags & IEEE80211_PROBE_FLAG_DIRECTED) chandef.chan = NULL; else chandef.chan = chan; skb = ieee80211_probereq_get(&local->hw, src, ssid, ssid_len, local->scan_ies_len + ie_len); if (!skb) return NULL; rate_masks[chan->band] = ratemask; ieee80211_put_preq_ies(skb, sdata, &dummy_ie_desc, ie, ie_len, BIT(chan->band), rate_masks, &chandef, flags); if (dst) { mgmt = (struct ieee80211_mgmt *) skb->data; memcpy(mgmt->da, dst, ETH_ALEN); memcpy(mgmt->bssid, dst, ETH_ALEN); } IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; return skb; } u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, struct ieee802_11_elems *elems, enum nl80211_band band, u32 *basic_rates) { struct ieee80211_supported_band *sband; size_t num_rates; u32 supp_rates, rate_flags; int i, j; sband = sdata->local->hw.wiphy->bands[band]; if (WARN_ON(!sband)) return 1; rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chanreq.oper); num_rates = sband->n_bitrates; supp_rates = 0; for (i = 0; i < elems->supp_rates_len + elems->ext_supp_rates_len; i++) { u8 rate = 0; int own_rate; bool is_basic; if (i < elems->supp_rates_len) rate = elems->supp_rates[i]; else if (elems->ext_supp_rates) rate = elems->ext_supp_rates [i - elems->supp_rates_len]; own_rate = 5 * (rate & 0x7f); is_basic = !!(rate & 0x80); if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY) continue; for (j = 0; j < num_rates; j++) { int brate; if ((rate_flags & sband->bitrates[j].flags) != rate_flags) continue; brate = sband->bitrates[j].bitrate; if (brate == own_rate) { supp_rates |= BIT(j); if (basic_rates && is_basic) *basic_rates |= BIT(j); } } } return supp_rates; } void ieee80211_stop_device(struct ieee80211_local *local, bool suspend) { local_bh_disable(); ieee80211_handle_queued_frames(local); local_bh_enable(); ieee80211_led_radio(local, false); ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO); wiphy_work_cancel(local->hw.wiphy, &local->reconfig_filter); flush_workqueue(local->workqueue); wiphy_work_flush(local->hw.wiphy, NULL); drv_stop(local, suspend); } static void ieee80211_flush_completed_scan(struct ieee80211_local *local, bool aborted) { /* It's possible that we don't handle the scan completion in * time during suspend, so if it's still marked as completed * here, queue the work and flush it to clean things up. * Instead of calling the worker function directly here, we * really queue it to avoid potential races with other flows * scheduling the same work. */ if (test_bit(SCAN_COMPLETED, &local->scanning)) { /* If coming from reconfiguration failure, abort the scan so * we don't attempt to continue a partial HW scan - which is * possible otherwise if (e.g.) the 2.4 GHz portion was the * completed scan, and a 5 GHz portion is still pending. */ if (aborted) set_bit(SCAN_ABORTED, &local->scanning); wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work, 0); wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work); } } static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; struct ieee80211_chanctx *ctx; lockdep_assert_wiphy(local->hw.wiphy); /* * We get here if during resume the device can't be restarted properly. * We might also get here if this happens during HW reset, which is a * slightly different situation and we need to drop all connections in * the latter case. * * Ask cfg80211 to turn off all interfaces, this will result in more * warnings but at least we'll then get into a clean stopped state. */ local->resuming = false; local->suspended = false; local->in_reconfig = false; local->reconfig_failure = true; ieee80211_flush_completed_scan(local, true); /* scheduled scan clearly can't be running any more, but tell * cfg80211 and clear local state */ ieee80211_sched_scan_end(local); list_for_each_entry(sdata, &local->interfaces, list) sdata->flags &= ~IEEE80211_SDATA_IN_DRIVER; /* Mark channel contexts as not being in the driver any more to avoid * removing them from the driver during the shutdown process... */ list_for_each_entry(ctx, &local->chanctx_list, list) ctx->driver_present = false; } static void ieee80211_assign_chanctx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link) { struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; lockdep_assert_wiphy(local->hw.wiphy); conf = rcu_dereference_protected(link->conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); if (conf) { ctx = container_of(conf, struct ieee80211_chanctx, conf); drv_assign_vif_chanctx(local, sdata, link->conf, ctx); } } static void ieee80211_reconfig_stations(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct sta_info *sta; lockdep_assert_wiphy(local->hw.wiphy); /* add STAs back */ list_for_each_entry(sta, &local->sta_list, list) { enum ieee80211_sta_state state; if (!sta->uploaded || sta->sdata != sdata) continue; for (state = IEEE80211_STA_NOTEXIST; state < sta->sta_state; state++) WARN_ON(drv_sta_state(local, sta->sdata, sta, state, state + 1)); } } static int ieee80211_reconfig_nan(struct ieee80211_sub_if_data *sdata) { struct cfg80211_nan_func *func, **funcs; int res, id, i = 0; res = drv_start_nan(sdata->local, sdata, &sdata->u.nan.conf); if (WARN_ON(res)) return res; funcs = kcalloc(sdata->local->hw.max_nan_de_entries + 1, sizeof(*funcs), GFP_KERNEL); if (!funcs) return -ENOMEM; /* Add all the functions: * This is a little bit ugly. We need to call a potentially sleeping * callback for each NAN function, so we can't hold the spinlock. */ spin_lock_bh(&sdata->u.nan.func_lock); idr_for_each_entry(&sdata->u.nan.function_inst_ids, func, id) funcs[i++] = func; spin_unlock_bh(&sdata->u.nan.func_lock); for (i = 0; funcs[i]; i++) { res = drv_add_nan_func(sdata->local, sdata, funcs[i]); if (WARN_ON(res)) ieee80211_nan_func_terminated(&sdata->vif, funcs[i]->instance_id, NL80211_NAN_FUNC_TERM_REASON_ERROR, GFP_KERNEL); } kfree(funcs); return 0; } static void ieee80211_reconfig_ap_links(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u64 changed) { int link_id; for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) { struct ieee80211_link_data *link; if (!(sdata->vif.active_links & BIT(link_id))) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; if (rcu_access_pointer(link->u.ap.beacon)) drv_start_ap(local, sdata, link->conf); if (!link->conf->enable_beacon) continue; changed |= BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED; ieee80211_link_info_change_notify(sdata, link, changed); } } int ieee80211_reconfig(struct ieee80211_local *local) { struct ieee80211_hw *hw = &local->hw; struct ieee80211_sub_if_data *sdata; struct ieee80211_chanctx *ctx; struct sta_info *sta; int res, i; bool reconfig_due_to_wowlan = false; struct ieee80211_sub_if_data *sched_scan_sdata; struct cfg80211_sched_scan_request *sched_scan_req; bool sched_scan_stopped = false; bool suspended = local->suspended; bool in_reconfig = false; lockdep_assert_wiphy(local->hw.wiphy); /* nothing to do if HW shouldn't run */ if (!local->open_count) goto wake_up; #ifdef CONFIG_PM if (suspended) local->resuming = true; if (local->wowlan) { /* * In the wowlan case, both mac80211 and the device * are functional when the resume op is called, so * clear local->suspended so the device could operate * normally (e.g. pass rx frames). */ local->suspended = false; res = drv_resume(local); local->wowlan = false; if (res < 0) { local->resuming = false; return res; } if (res == 0) goto wake_up; WARN_ON(res > 1); /* * res is 1, which means the driver requested * to go through a regular reset on wakeup. * restore local->suspended in this case. */ reconfig_due_to_wowlan = true; local->suspended = true; } #endif /* * In case of hw_restart during suspend (without wowlan), * cancel restart work, as we are reconfiguring the device * anyway. * Note that restart_work is scheduled on a frozen workqueue, * so we can't deadlock in this case. */ if (suspended && local->in_reconfig && !reconfig_due_to_wowlan) cancel_work_sync(&local->restart_work); local->started = false; /* * Upon resume hardware can sometimes be goofy due to * various platform / driver / bus issues, so restarting * the device may at times not work immediately. Propagate * the error. */ res = drv_start(local); if (res) { if (suspended) WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n"); else WARN(1, "Hardware became unavailable during restart.\n"); ieee80211_handle_reconfig_failure(local); return res; } /* setup fragmentation threshold */ drv_set_frag_threshold(local, hw->wiphy->frag_threshold); /* setup RTS threshold */ drv_set_rts_threshold(local, hw->wiphy->rts_threshold); /* reset coverage class */ drv_set_coverage_class(local, hw->wiphy->coverage_class); ieee80211_led_radio(local, true); ieee80211_mod_tpt_led_trig(local, IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); /* add interfaces */ sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); if (sdata && ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF)) { /* in HW restart it exists already */ WARN_ON(local->resuming); res = drv_add_interface(local, sdata); if (WARN_ON(res)) { RCU_INIT_POINTER(local->monitor_sdata, NULL); synchronize_net(); kfree(sdata); } } list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && ieee80211_sdata_running(sdata)) { res = drv_add_interface(local, sdata); if (WARN_ON(res)) break; } } /* If adding any of the interfaces failed above, roll back and * report failure. */ if (res) { list_for_each_entry_continue_reverse(sdata, &local->interfaces, list) if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_MONITOR && ieee80211_sdata_running(sdata)) drv_remove_interface(local, sdata); ieee80211_handle_reconfig_failure(local); return res; } /* add channel contexts */ list_for_each_entry(ctx, &local->chanctx_list, list) if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) WARN_ON(drv_add_chanctx(local, ctx)); sdata = wiphy_dereference(local->hw.wiphy, local->monitor_sdata); if (sdata && ieee80211_sdata_running(sdata)) ieee80211_assign_chanctx(local, sdata, &sdata->deflink); /* reconfigure hardware */ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_LISTEN_INTERVAL | IEEE80211_CONF_CHANGE_MONITOR | IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_RETRY_LIMITS | IEEE80211_CONF_CHANGE_IDLE); ieee80211_configure_filter(local); /* Finally also reconfigure all the BSS information */ list_for_each_entry(sdata, &local->interfaces, list) { /* common change flags for all interface types - link only */ u64 changed = BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT | BSS_CHANGED_HT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT | BSS_CHANGED_BSSID | BSS_CHANGED_CQM | BSS_CHANGED_QOS | BSS_CHANGED_TXPOWER | BSS_CHANGED_MCAST_RATE; struct ieee80211_link_data *link = NULL; unsigned int link_id; u32 active_links = 0; if (!ieee80211_sdata_running(sdata)) continue; if (ieee80211_vif_is_mld(&sdata->vif)) { struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS] = { [0] = &sdata->vif.bss_conf, }; if (sdata->vif.type == NL80211_IFTYPE_STATION) { /* start with a single active link */ active_links = sdata->vif.active_links; link_id = ffs(active_links) - 1; sdata->vif.active_links = BIT(link_id); } drv_change_vif_links(local, sdata, 0, sdata->vif.active_links, old); } sdata->restart_active_links = active_links; for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) { if (!ieee80211_vif_link_active(&sdata->vif, link_id)) continue; link = sdata_dereference(sdata->link[link_id], sdata); if (!link) continue; ieee80211_assign_chanctx(local, sdata, link); } switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: break; case NL80211_IFTYPE_ADHOC: if (sdata->vif.cfg.ibss_joined) WARN_ON(drv_join_ibss(local, sdata)); fallthrough; default: ieee80211_reconfig_stations(sdata); fallthrough; case NL80211_IFTYPE_AP: /* AP stations are handled later */ for (i = 0; i < IEEE80211_NUM_ACS; i++) drv_conf_tx(local, &sdata->deflink, i, &sdata->deflink.tx_conf[i]); break; } if (sdata->vif.bss_conf.mu_mimo_owner) changed |= BSS_CHANGED_MU_GROUPS; if (!ieee80211_vif_is_mld(&sdata->vif)) changed |= BSS_CHANGED_IDLE; switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: if (!ieee80211_vif_is_mld(&sdata->vif)) { changed |= BSS_CHANGED_ASSOC | BSS_CHANGED_ARP_FILTER | BSS_CHANGED_PS; /* Re-send beacon info report to the driver */ if (sdata->deflink.u.mgd.have_beacon) changed |= BSS_CHANGED_BEACON_INFO; if (sdata->vif.bss_conf.max_idle_period || sdata->vif.bss_conf.protected_keep_alive) changed |= BSS_CHANGED_KEEP_ALIVE; ieee80211_bss_info_change_notify(sdata, changed); } else if (!WARN_ON(!link)) { ieee80211_link_info_change_notify(sdata, link, changed); changed = BSS_CHANGED_ASSOC | BSS_CHANGED_IDLE | BSS_CHANGED_PS | BSS_CHANGED_ARP_FILTER; ieee80211_vif_cfg_change_notify(sdata, changed); } break; case NL80211_IFTYPE_OCB: changed |= BSS_CHANGED_OCB; ieee80211_bss_info_change_notify(sdata, changed); break; case NL80211_IFTYPE_ADHOC: changed |= BSS_CHANGED_IBSS; fallthrough; case NL80211_IFTYPE_AP: changed |= BSS_CHANGED_P2P_PS; if (ieee80211_vif_is_mld(&sdata->vif)) ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_SSID); else changed |= BSS_CHANGED_SSID; if (sdata->vif.bss_conf.ftm_responder == 1 && wiphy_ext_feature_isset(sdata->local->hw.wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER)) changed |= BSS_CHANGED_FTM_RESPONDER; if (sdata->vif.type == NL80211_IFTYPE_AP) { changed |= BSS_CHANGED_AP_PROBE_RESP; if (ieee80211_vif_is_mld(&sdata->vif)) { ieee80211_reconfig_ap_links(local, sdata, changed); break; } if (rcu_access_pointer(sdata->deflink.u.ap.beacon)) drv_start_ap(local, sdata, sdata->deflink.conf); } fallthrough; case NL80211_IFTYPE_MESH_POINT: if (sdata->vif.bss_conf.enable_beacon) { changed |= BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED; ieee80211_bss_info_change_notify(sdata, changed); } break; case NL80211_IFTYPE_NAN: res = ieee80211_reconfig_nan(sdata); if (res < 0) { ieee80211_handle_reconfig_failure(local); return res; } break; case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_P2P_DEVICE: /* nothing to do */ break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_WDS: WARN_ON(1); break; } } ieee80211_recalc_ps(local); /* * The sta might be in psm against the ap (e.g. because * this was the state before a hw restart), so we * explicitly send a null packet in order to make sure * it'll sync against the ap (and get out of psm). */ if (!(local->hw.conf.flags & IEEE80211_CONF_PS)) { list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; if (!sdata->u.mgd.associated) continue; ieee80211_send_nullfunc(local, sdata, false); } } /* APs are now beaconing, add back stations */ list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_AP: ieee80211_reconfig_stations(sdata); break; default: break; } } /* add back keys */ list_for_each_entry(sdata, &local->interfaces, list) ieee80211_reenable_keys(sdata); /* re-enable multi-link for client interfaces */ list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->restart_active_links) ieee80211_set_active_links(&sdata->vif, sdata->restart_active_links); /* * If a link switch was scheduled before the restart, and ran * before reconfig, it will do nothing, so re-schedule. */ if (sdata->desired_active_links) wiphy_work_queue(sdata->local->hw.wiphy, &sdata->activate_links_work); } /* Reconfigure sched scan if it was interrupted by FW restart */ sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, lockdep_is_held(&local->hw.wiphy->mtx)); sched_scan_req = rcu_dereference_protected(local->sched_scan_req, lockdep_is_held(&local->hw.wiphy->mtx)); if (sched_scan_sdata && sched_scan_req) /* * Sched scan stopped, but we don't want to report it. Instead, * we're trying to reschedule. However, if more than one scan * plan was set, we cannot reschedule since we don't know which * scan plan was currently running (and some scan plans may have * already finished). */ if (sched_scan_req->n_scan_plans > 1 || __ieee80211_request_sched_scan_start(sched_scan_sdata, sched_scan_req)) { RCU_INIT_POINTER(local->sched_scan_sdata, NULL); RCU_INIT_POINTER(local->sched_scan_req, NULL); sched_scan_stopped = true; } if (sched_scan_stopped) cfg80211_sched_scan_stopped_locked(local->hw.wiphy, 0); wake_up: if (local->monitors == local->open_count && local->monitors > 0) ieee80211_add_virtual_monitor(local); /* * Clear the WLAN_STA_BLOCK_BA flag so new aggregation * sessions can be established after a resume. * * Also tear down aggregation sessions since reconfiguring * them in a hardware restart scenario is not easily done * right now, and the hardware will have lost information * about the sessions, but we and the AP still think they * are active. This is really a workaround though. */ if (ieee80211_hw_check(hw, AMPDU_AGGREGATION)) { list_for_each_entry(sta, &local->sta_list, list) { if (!local->resuming) ieee80211_sta_tear_down_BA_sessions( sta, AGG_STOP_LOCAL_REQUEST); clear_sta_flag(sta, WLAN_STA_BLOCK_BA); } } /* * If this is for hw restart things are still running. * We may want to change that later, however. */ if (local->open_count && (!suspended || reconfig_due_to_wowlan)) drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_RESTART); if (local->in_reconfig) { in_reconfig = local->in_reconfig; local->in_reconfig = false; barrier(); ieee80211_reconfig_roc(local); /* Requeue all works */ list_for_each_entry(sdata, &local->interfaces, list) wiphy_work_queue(local->hw.wiphy, &sdata->work); } ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, IEEE80211_QUEUE_STOP_REASON_SUSPEND, false); if (in_reconfig) { list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_sta_restart(sdata); } } if (!suspended) return 0; #ifdef CONFIG_PM /* first set suspended false, then resuming */ local->suspended = false; mb(); local->resuming = false; ieee80211_flush_completed_scan(local, false); if (local->open_count && !reconfig_due_to_wowlan) drv_reconfig_complete(local, IEEE80211_RECONFIG_TYPE_SUSPEND); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) continue; if (sdata->vif.type == NL80211_IFTYPE_STATION) ieee80211_sta_restart(sdata); } mod_timer(&local->sta_cleanup, jiffies + 1); #else WARN_ON(1); #endif return 0; } static void ieee80211_reconfig_disconnect(struct ieee80211_vif *vif, u8 flag) { struct ieee80211_sub_if_data *sdata; struct ieee80211_local *local; struct ieee80211_key *key; if (WARN_ON(!vif)) return; sdata = vif_to_sdata(vif); local = sdata->local; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(flag & IEEE80211_SDATA_DISCONNECT_RESUME && !local->resuming)) return; if (WARN_ON(flag & IEEE80211_SDATA_DISCONNECT_HW_RESTART && !local->in_reconfig)) return; if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) return; sdata->flags |= flag; list_for_each_entry(key, &sdata->key_list, list) key->flags |= KEY_FLAG_TAINTED; } void ieee80211_hw_restart_disconnect(struct ieee80211_vif *vif) { ieee80211_reconfig_disconnect(vif, IEEE80211_SDATA_DISCONNECT_HW_RESTART); } EXPORT_SYMBOL_GPL(ieee80211_hw_restart_disconnect); void ieee80211_resume_disconnect(struct ieee80211_vif *vif) { ieee80211_reconfig_disconnect(vif, IEEE80211_SDATA_DISCONNECT_RESUME); } EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect); void ieee80211_recalc_smps(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_chanctx *chanctx; lockdep_assert_wiphy(local->hw.wiphy); chanctx_conf = rcu_dereference_protected(link->conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); /* * This function can be called from a work, thus it may be possible * that the chanctx_conf is removed (due to a disconnection, for * example). * So nothing should be done in such case. */ if (!chanctx_conf) return; chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); ieee80211_recalc_smps_chanctx(local, chanctx); } void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata, int link_id) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_chanctx *chanctx; int i; lockdep_assert_wiphy(local->hw.wiphy); for (i = 0; i < ARRAY_SIZE(sdata->vif.link_conf); i++) { struct ieee80211_bss_conf *bss_conf; if (link_id >= 0 && link_id != i) continue; rcu_read_lock(); bss_conf = rcu_dereference(sdata->vif.link_conf[i]); if (!bss_conf) { rcu_read_unlock(); continue; } chanctx_conf = rcu_dereference_protected(bss_conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); /* * Since we hold the wiphy mutex (checked above) * we can take the chanctx_conf pointer out of the * RCU critical section, it cannot go away without * the mutex. Just the way we reached it could - in * theory - go away, but we don't really care and * it really shouldn't happen anyway. */ rcu_read_unlock(); if (!chanctx_conf) return; chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false); } } size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset) { size_t pos = offset; while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC) pos += 2 + ies[pos + 1]; return pos; } u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, u16 cap) { __le16 tmp; *pos++ = WLAN_EID_HT_CAPABILITY; *pos++ = sizeof(struct ieee80211_ht_cap); memset(pos, 0, sizeof(struct ieee80211_ht_cap)); /* capability flags */ tmp = cpu_to_le16(cap); memcpy(pos, &tmp, sizeof(u16)); pos += sizeof(u16); /* AMPDU parameters */ *pos++ = ht_cap->ampdu_factor | (ht_cap->ampdu_density << IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT); /* MCS set */ memcpy(pos, &ht_cap->mcs, sizeof(ht_cap->mcs)); pos += sizeof(ht_cap->mcs); /* extended capabilities */ pos += sizeof(__le16); /* BF capabilities */ pos += sizeof(__le32); /* antenna selection */ pos += sizeof(u8); return pos; } u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, u32 cap) { __le32 tmp; *pos++ = WLAN_EID_VHT_CAPABILITY; *pos++ = sizeof(struct ieee80211_vht_cap); memset(pos, 0, sizeof(struct ieee80211_vht_cap)); /* capability flags */ tmp = cpu_to_le32(cap); memcpy(pos, &tmp, sizeof(u32)); pos += sizeof(u32); /* VHT MCS set */ memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs)); pos += sizeof(vht_cap->vht_mcs); return pos; } /* this may return more than ieee80211_put_he_6ghz_cap() will need */ u8 ieee80211_ie_len_he_cap(struct ieee80211_sub_if_data *sdata) { const struct ieee80211_sta_he_cap *he_cap; struct ieee80211_supported_band *sband; u8 n; sband = ieee80211_get_sband(sdata); if (!sband) return 0; he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (!he_cap) return 0; n = ieee80211_he_mcs_nss_size(&he_cap->he_cap_elem); return 2 + 1 + sizeof(he_cap->he_cap_elem) + n + ieee80211_he_ppe_size(he_cap->ppe_thres[0], he_cap->he_cap_elem.phy_cap_info); } static void ieee80211_get_adjusted_he_cap(const struct ieee80211_conn_settings *conn, const struct ieee80211_sta_he_cap *he_cap, struct ieee80211_he_cap_elem *elem) { u8 ru_limit, max_ru; *elem = he_cap->he_cap_elem; switch (conn->bw_limit) { case IEEE80211_CONN_BW_LIMIT_20: ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242; break; case IEEE80211_CONN_BW_LIMIT_40: ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484; break; case IEEE80211_CONN_BW_LIMIT_80: ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996; break; default: ru_limit = IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996; break; } max_ru = elem->phy_cap_info[8] & IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK; max_ru = min(max_ru, ru_limit); elem->phy_cap_info[8] &= ~IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK; elem->phy_cap_info[8] |= max_ru; if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_40) { elem->phy_cap_info[0] &= ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G); elem->phy_cap_info[9] &= ~IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM; } if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) { elem->phy_cap_info[0] &= ~(IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G); elem->phy_cap_info[5] &= ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; elem->phy_cap_info[7] &= ~(IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ | IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ); } } int ieee80211_put_he_cap(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata, const struct ieee80211_supported_band *sband, const struct ieee80211_conn_settings *conn) { const struct ieee80211_sta_he_cap *he_cap; struct ieee80211_he_cap_elem elem; u8 *len; u8 n; u8 ie_len; if (!conn) conn = &ieee80211_conn_settings_unlimited; he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); if (!he_cap) return 0; /* modify on stack first to calculate 'n' and 'ie_len' correctly */ ieee80211_get_adjusted_he_cap(conn, he_cap, &elem); n = ieee80211_he_mcs_nss_size(&elem); ie_len = 2 + 1 + sizeof(he_cap->he_cap_elem) + n + ieee80211_he_ppe_size(he_cap->ppe_thres[0], he_cap->he_cap_elem.phy_cap_info); if (skb_tailroom(skb) < ie_len) return -ENOBUFS; skb_put_u8(skb, WLAN_EID_EXTENSION); len = skb_put(skb, 1); /* We'll set the size later below */ skb_put_u8(skb, WLAN_EID_EXT_HE_CAPABILITY); /* Fixed data */ skb_put_data(skb, &elem, sizeof(elem)); skb_put_data(skb, &he_cap->he_mcs_nss_supp, n); /* Check if PPE Threshold should be present */ if ((he_cap->he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0) goto end; /* * Calculate how many PPET16/PPET8 pairs are to come. Algorithm: * (NSS_M1 + 1) x (num of 1 bits in RU_INDEX_BITMASK) */ n = hweight8(he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); n *= (1 + ((he_cap->ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) >> IEEE80211_PPE_THRES_NSS_POS)); /* * Each pair is 6 bits, and we need to add the 7 "header" bits to the * total size. */ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7; n = DIV_ROUND_UP(n, 8); /* Copy PPE Thresholds */ skb_put_data(skb, &he_cap->ppe_thres, n); end: *len = skb_tail_pointer(skb) - len - 1; return 0; } int ieee80211_put_he_6ghz_cap(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode) { struct ieee80211_supported_band *sband; const struct ieee80211_sband_iftype_data *iftd; enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif); __le16 cap; if (!cfg80211_any_usable_channels(sdata->local->hw.wiphy, BIT(NL80211_BAND_6GHZ), IEEE80211_CHAN_NO_HE)) return 0; sband = sdata->local->hw.wiphy->bands[NL80211_BAND_6GHZ]; iftd = ieee80211_get_sband_iftype_data(sband, iftype); if (!iftd) return 0; /* Check for device HE 6 GHz capability before adding element */ if (!iftd->he_6ghz_capa.capa) return 0; cap = iftd->he_6ghz_capa.capa; cap &= cpu_to_le16(~IEEE80211_HE_6GHZ_CAP_SM_PS); switch (smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); fallthrough; case IEEE80211_SMPS_OFF: cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED, IEEE80211_HE_6GHZ_CAP_SM_PS); break; case IEEE80211_SMPS_STATIC: cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_STATIC, IEEE80211_HE_6GHZ_CAP_SM_PS); break; case IEEE80211_SMPS_DYNAMIC: cap |= le16_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC, IEEE80211_HE_6GHZ_CAP_SM_PS); break; } if (skb_tailroom(skb) < 2 + 1 + sizeof(cap)) return -ENOBUFS; skb_put_u8(skb, WLAN_EID_EXTENSION); skb_put_u8(skb, 1 + sizeof(cap)); skb_put_u8(skb, WLAN_EID_EXT_HE_6GHZ_CAPA); skb_put_data(skb, &cap, sizeof(cap)); return 0; } u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, const struct cfg80211_chan_def *chandef, u16 prot_mode, bool rifs_mode) { struct ieee80211_ht_operation *ht_oper; /* Build HT Information */ *pos++ = WLAN_EID_HT_OPERATION; *pos++ = sizeof(struct ieee80211_ht_operation); ht_oper = (struct ieee80211_ht_operation *)pos; ht_oper->primary_chan = ieee80211_frequency_to_channel( chandef->chan->center_freq); switch (chandef->width) { case NL80211_CHAN_WIDTH_160: case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_80: case NL80211_CHAN_WIDTH_40: if (chandef->center_freq1 > chandef->chan->center_freq) ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; else ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; break; case NL80211_CHAN_WIDTH_320: /* HT information element should not be included on 6GHz */ WARN_ON(1); return pos; default: ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; break; } if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && chandef->width != NL80211_CHAN_WIDTH_20_NOHT && chandef->width != NL80211_CHAN_WIDTH_20) ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; if (rifs_mode) ht_oper->ht_param |= IEEE80211_HT_PARAM_RIFS_MODE; ht_oper->operation_mode = cpu_to_le16(prot_mode); ht_oper->stbc_param = 0x0000; /* It seems that Basic MCS set and Supported MCS set are identical for the first 10 bytes */ memset(&ht_oper->basic_set, 0, 16); memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10); return pos + sizeof(struct ieee80211_ht_operation); } void ieee80211_ie_build_wide_bw_cs(u8 *pos, const struct cfg80211_chan_def *chandef) { *pos++ = WLAN_EID_WIDE_BW_CHANNEL_SWITCH; /* EID */ *pos++ = 3; /* IE length */ /* New channel width */ switch (chandef->width) { case NL80211_CHAN_WIDTH_80: *pos++ = IEEE80211_VHT_CHANWIDTH_80MHZ; break; case NL80211_CHAN_WIDTH_160: *pos++ = IEEE80211_VHT_CHANWIDTH_160MHZ; break; case NL80211_CHAN_WIDTH_80P80: *pos++ = IEEE80211_VHT_CHANWIDTH_80P80MHZ; break; case NL80211_CHAN_WIDTH_320: /* The behavior is not defined for 320 MHz channels */ WARN_ON(1); fallthrough; default: *pos++ = IEEE80211_VHT_CHANWIDTH_USE_HT; } /* new center frequency segment 0 */ *pos++ = ieee80211_frequency_to_channel(chandef->center_freq1); /* new center frequency segment 1 */ if (chandef->center_freq2) *pos++ = ieee80211_frequency_to_channel(chandef->center_freq2); else *pos++ = 0; } u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap, const struct cfg80211_chan_def *chandef) { struct ieee80211_vht_operation *vht_oper; *pos++ = WLAN_EID_VHT_OPERATION; *pos++ = sizeof(struct ieee80211_vht_operation); vht_oper = (struct ieee80211_vht_operation *)pos; vht_oper->center_freq_seg0_idx = ieee80211_frequency_to_channel( chandef->center_freq1); if (chandef->center_freq2) vht_oper->center_freq_seg1_idx = ieee80211_frequency_to_channel(chandef->center_freq2); else vht_oper->center_freq_seg1_idx = 0x00; switch (chandef->width) { case NL80211_CHAN_WIDTH_160: /* * Convert 160 MHz channel width to new style as interop * workaround. */ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; vht_oper->center_freq_seg1_idx = vht_oper->center_freq_seg0_idx; if (chandef->chan->center_freq < chandef->center_freq1) vht_oper->center_freq_seg0_idx -= 8; else vht_oper->center_freq_seg0_idx += 8; break; case NL80211_CHAN_WIDTH_80P80: /* * Convert 80+80 MHz channel width to new style as interop * workaround. */ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; break; case NL80211_CHAN_WIDTH_80: vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ; break; case NL80211_CHAN_WIDTH_320: /* VHT information element should not be included on 6GHz */ WARN_ON(1); return pos; default: vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT; break; } /* don't require special VHT peer rates */ vht_oper->basic_mcs_set = cpu_to_le16(0xffff); return pos + sizeof(struct ieee80211_vht_operation); } u8 *ieee80211_ie_build_he_oper(u8 *pos, struct cfg80211_chan_def *chandef) { struct ieee80211_he_operation *he_oper; struct ieee80211_he_6ghz_oper *he_6ghz_op; u32 he_oper_params; u8 ie_len = 1 + sizeof(struct ieee80211_he_operation); if (chandef->chan->band == NL80211_BAND_6GHZ) ie_len += sizeof(struct ieee80211_he_6ghz_oper); *pos++ = WLAN_EID_EXTENSION; *pos++ = ie_len; *pos++ = WLAN_EID_EXT_HE_OPERATION; he_oper_params = 0; he_oper_params |= u32_encode_bits(1023, /* disabled */ IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK); he_oper_params |= u32_encode_bits(1, IEEE80211_HE_OPERATION_ER_SU_DISABLE); he_oper_params |= u32_encode_bits(1, IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED); if (chandef->chan->band == NL80211_BAND_6GHZ) he_oper_params |= u32_encode_bits(1, IEEE80211_HE_OPERATION_6GHZ_OP_INFO); he_oper = (struct ieee80211_he_operation *)pos; he_oper->he_oper_params = cpu_to_le32(he_oper_params); /* don't require special HE peer rates */ he_oper->he_mcs_nss_set = cpu_to_le16(0xffff); pos += sizeof(struct ieee80211_he_operation); if (chandef->chan->band != NL80211_BAND_6GHZ) goto out; /* TODO add VHT operational */ he_6ghz_op = (struct ieee80211_he_6ghz_oper *)pos; he_6ghz_op->minrate = 6; /* 6 Mbps */ he_6ghz_op->primary = ieee80211_frequency_to_channel(chandef->chan->center_freq); he_6ghz_op->ccfs0 = ieee80211_frequency_to_channel(chandef->center_freq1); if (chandef->center_freq2) he_6ghz_op->ccfs1 = ieee80211_frequency_to_channel(chandef->center_freq2); else he_6ghz_op->ccfs1 = 0; switch (chandef->width) { case NL80211_CHAN_WIDTH_320: /* * TODO: mesh operation is not defined over 6GHz 320 MHz * channels. */ WARN_ON(1); break; case NL80211_CHAN_WIDTH_160: /* Convert 160 MHz channel width to new style as interop * workaround. */ he_6ghz_op->control = IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ; he_6ghz_op->ccfs1 = he_6ghz_op->ccfs0; if (chandef->chan->center_freq < chandef->center_freq1) he_6ghz_op->ccfs0 -= 8; else he_6ghz_op->ccfs0 += 8; fallthrough; case NL80211_CHAN_WIDTH_80P80: he_6ghz_op->control = IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ; break; case NL80211_CHAN_WIDTH_80: he_6ghz_op->control = IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ; break; case NL80211_CHAN_WIDTH_40: he_6ghz_op->control = IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ; break; default: he_6ghz_op->control = IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ; break; } pos += sizeof(struct ieee80211_he_6ghz_oper); out: return pos; } u8 *ieee80211_ie_build_eht_oper(u8 *pos, struct cfg80211_chan_def *chandef, const struct ieee80211_sta_eht_cap *eht_cap) { const struct ieee80211_eht_mcs_nss_supp_20mhz_only *eht_mcs_nss = &eht_cap->eht_mcs_nss_supp.only_20mhz; struct ieee80211_eht_operation *eht_oper; struct ieee80211_eht_operation_info *eht_oper_info; u8 eht_oper_len = offsetof(struct ieee80211_eht_operation, optional); u8 eht_oper_info_len = offsetof(struct ieee80211_eht_operation_info, optional); u8 chan_width = 0; *pos++ = WLAN_EID_EXTENSION; *pos++ = 1 + eht_oper_len + eht_oper_info_len; *pos++ = WLAN_EID_EXT_EHT_OPERATION; eht_oper = (struct ieee80211_eht_operation *)pos; memcpy(&eht_oper->basic_mcs_nss, eht_mcs_nss, sizeof(*eht_mcs_nss)); eht_oper->params |= IEEE80211_EHT_OPER_INFO_PRESENT; pos += eht_oper_len; eht_oper_info = (struct ieee80211_eht_operation_info *)eht_oper->optional; eht_oper_info->ccfs0 = ieee80211_frequency_to_channel(chandef->center_freq1); if (chandef->center_freq2) eht_oper_info->ccfs1 = ieee80211_frequency_to_channel(chandef->center_freq2); else eht_oper_info->ccfs1 = 0; switch (chandef->width) { case NL80211_CHAN_WIDTH_320: chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ; eht_oper_info->ccfs1 = eht_oper_info->ccfs0; if (chandef->chan->center_freq < chandef->center_freq1) eht_oper_info->ccfs0 -= 16; else eht_oper_info->ccfs0 += 16; break; case NL80211_CHAN_WIDTH_160: eht_oper_info->ccfs1 = eht_oper_info->ccfs0; if (chandef->chan->center_freq < chandef->center_freq1) eht_oper_info->ccfs0 -= 8; else eht_oper_info->ccfs0 += 8; fallthrough; case NL80211_CHAN_WIDTH_80P80: chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ; break; case NL80211_CHAN_WIDTH_80: chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ; break; case NL80211_CHAN_WIDTH_40: chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ; break; default: chan_width = IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ; break; } eht_oper_info->control = chan_width; pos += eht_oper_info_len; /* TODO: eht_oper_info->optional */ return pos; } bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper, struct cfg80211_chan_def *chandef) { enum nl80211_channel_type channel_type; if (!ht_oper) return false; switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { case IEEE80211_HT_PARAM_CHA_SEC_NONE: channel_type = NL80211_CHAN_HT20; break; case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: channel_type = NL80211_CHAN_HT40PLUS; break; case IEEE80211_HT_PARAM_CHA_SEC_BELOW: channel_type = NL80211_CHAN_HT40MINUS; break; default: return false; } cfg80211_chandef_create(chandef, chandef->chan, channel_type); return true; } bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info, const struct ieee80211_vht_operation *oper, const struct ieee80211_ht_operation *htop, struct cfg80211_chan_def *chandef) { struct cfg80211_chan_def new = *chandef; int cf0, cf1; int ccfs0, ccfs1, ccfs2; int ccf0, ccf1; u32 vht_cap; bool support_80_80 = false; bool support_160 = false; u8 ext_nss_bw_supp = u32_get_bits(vht_cap_info, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK); u8 supp_chwidth = u32_get_bits(vht_cap_info, IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK); if (!oper || !htop) return false; vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap; support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK | IEEE80211_VHT_CAP_EXT_NSS_BW_MASK)); support_80_80 = ((vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) || (vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ && vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) || ((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >> IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1)); ccfs0 = oper->center_freq_seg0_idx; ccfs1 = oper->center_freq_seg1_idx; ccfs2 = (le16_to_cpu(htop->operation_mode) & IEEE80211_HT_OP_MODE_CCFS2_MASK) >> IEEE80211_HT_OP_MODE_CCFS2_SHIFT; ccf0 = ccfs0; /* if not supported, parse as though we didn't understand it */ if (!ieee80211_hw_check(hw, SUPPORTS_VHT_EXT_NSS_BW)) ext_nss_bw_supp = 0; /* * Cf. IEEE 802.11 Table 9-250 * * We really just consider that because it's inefficient to connect * at a higher bandwidth than we'll actually be able to use. */ switch ((supp_chwidth << 4) | ext_nss_bw_supp) { default: case 0x00: ccf1 = 0; support_160 = false; support_80_80 = false; break; case 0x01: support_80_80 = false; fallthrough; case 0x02: case 0x03: ccf1 = ccfs2; break; case 0x10: ccf1 = ccfs1; break; case 0x11: case 0x12: if (!ccfs1) ccf1 = ccfs2; else ccf1 = ccfs1; break; case 0x13: case 0x20: case 0x23: ccf1 = ccfs1; break; } cf0 = ieee80211_channel_to_frequency(ccf0, chandef->chan->band); cf1 = ieee80211_channel_to_frequency(ccf1, chandef->chan->band); switch (oper->chan_width) { case IEEE80211_VHT_CHANWIDTH_USE_HT: /* just use HT information directly */ break; case IEEE80211_VHT_CHANWIDTH_80MHZ: new.width = NL80211_CHAN_WIDTH_80; new.center_freq1 = cf0; /* If needed, adjust based on the newer interop workaround. */ if (ccf1) { unsigned int diff; diff = abs(ccf1 - ccf0); if ((diff == 8) && support_160) { new.width = NL80211_CHAN_WIDTH_160; new.center_freq1 = cf1; } else if ((diff > 8) && support_80_80) { new.width = NL80211_CHAN_WIDTH_80P80; new.center_freq2 = cf1; } } break; case IEEE80211_VHT_CHANWIDTH_160MHZ: /* deprecated encoding */ new.width = NL80211_CHAN_WIDTH_160; new.center_freq1 = cf0; break; case IEEE80211_VHT_CHANWIDTH_80P80MHZ: /* deprecated encoding */ new.width = NL80211_CHAN_WIDTH_80P80; new.center_freq1 = cf0; new.center_freq2 = cf1; break; default: return false; } if (!cfg80211_chandef_valid(&new)) return false; *chandef = new; return true; } void ieee80211_chandef_eht_oper(const struct ieee80211_eht_operation_info *info, struct cfg80211_chan_def *chandef) { chandef->center_freq1 = ieee80211_channel_to_frequency(info->ccfs0, chandef->chan->band); switch (u8_get_bits(info->control, IEEE80211_EHT_OPER_CHAN_WIDTH)) { case IEEE80211_EHT_OPER_CHAN_WIDTH_20MHZ: chandef->width = NL80211_CHAN_WIDTH_20; break; case IEEE80211_EHT_OPER_CHAN_WIDTH_40MHZ: chandef->width = NL80211_CHAN_WIDTH_40; break; case IEEE80211_EHT_OPER_CHAN_WIDTH_80MHZ: chandef->width = NL80211_CHAN_WIDTH_80; break; case IEEE80211_EHT_OPER_CHAN_WIDTH_160MHZ: chandef->width = NL80211_CHAN_WIDTH_160; chandef->center_freq1 = ieee80211_channel_to_frequency(info->ccfs1, chandef->chan->band); break; case IEEE80211_EHT_OPER_CHAN_WIDTH_320MHZ: chandef->width = NL80211_CHAN_WIDTH_320; chandef->center_freq1 = ieee80211_channel_to_frequency(info->ccfs1, chandef->chan->band); break; } } bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_local *local, const struct ieee80211_he_operation *he_oper, const struct ieee80211_eht_operation *eht_oper, struct cfg80211_chan_def *chandef) { struct cfg80211_chan_def he_chandef = *chandef; const struct ieee80211_he_6ghz_oper *he_6ghz_oper; u32 freq; if (chandef->chan->band != NL80211_BAND_6GHZ) return true; if (!he_oper) return false; he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper); if (!he_6ghz_oper) return false; /* * The EHT operation IE does not contain the primary channel so the * primary channel frequency should be taken from the 6 GHz operation * information. */ freq = ieee80211_channel_to_frequency(he_6ghz_oper->primary, NL80211_BAND_6GHZ); he_chandef.chan = ieee80211_get_channel(local->hw.wiphy, freq); if (!he_chandef.chan) return false; if (!eht_oper || !(eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT)) { switch (u8_get_bits(he_6ghz_oper->control, IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH)) { case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_20MHZ: he_chandef.width = NL80211_CHAN_WIDTH_20; break; case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_40MHZ: he_chandef.width = NL80211_CHAN_WIDTH_40; break; case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ: he_chandef.width = NL80211_CHAN_WIDTH_80; break; case IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ: he_chandef.width = NL80211_CHAN_WIDTH_80; if (!he_6ghz_oper->ccfs1) break; if (abs(he_6ghz_oper->ccfs1 - he_6ghz_oper->ccfs0) == 8) he_chandef.width = NL80211_CHAN_WIDTH_160; else he_chandef.width = NL80211_CHAN_WIDTH_80P80; break; } if (he_chandef.width == NL80211_CHAN_WIDTH_160) { he_chandef.center_freq1 = ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1, NL80211_BAND_6GHZ); } else { he_chandef.center_freq1 = ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0, NL80211_BAND_6GHZ); he_chandef.center_freq2 = ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1, NL80211_BAND_6GHZ); } } else { ieee80211_chandef_eht_oper((const void *)eht_oper->optional, &he_chandef); he_chandef.punctured = ieee80211_eht_oper_dis_subchan_bitmap(eht_oper); } if (!cfg80211_chandef_valid(&he_chandef)) return false; *chandef = he_chandef; return true; } bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper, struct cfg80211_chan_def *chandef) { u32 oper_freq; if (!oper) return false; switch (FIELD_GET(S1G_OPER_CH_WIDTH_OPER, oper->ch_width)) { case IEEE80211_S1G_CHANWIDTH_1MHZ: chandef->width = NL80211_CHAN_WIDTH_1; break; case IEEE80211_S1G_CHANWIDTH_2MHZ: chandef->width = NL80211_CHAN_WIDTH_2; break; case IEEE80211_S1G_CHANWIDTH_4MHZ: chandef->width = NL80211_CHAN_WIDTH_4; break; case IEEE80211_S1G_CHANWIDTH_8MHZ: chandef->width = NL80211_CHAN_WIDTH_8; break; case IEEE80211_S1G_CHANWIDTH_16MHZ: chandef->width = NL80211_CHAN_WIDTH_16; break; default: return false; } oper_freq = ieee80211_channel_to_freq_khz(oper->oper_ch, NL80211_BAND_S1GHZ); chandef->center_freq1 = KHZ_TO_MHZ(oper_freq); chandef->freq1_offset = oper_freq % 1000; return true; } int ieee80211_put_srates_elem(struct sk_buff *skb, const struct ieee80211_supported_band *sband, u32 basic_rates, u32 rate_flags, u32 masked_rates, u8 element_id) { u8 i, rates, skip; rates = 0; for (i = 0; i < sband->n_bitrates; i++) { if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; if (masked_rates & BIT(i)) continue; rates++; } if (element_id == WLAN_EID_SUPP_RATES) { rates = min_t(u8, rates, 8); skip = 0; } else { skip = 8; if (rates <= skip) return 0; rates -= skip; } if (skb_tailroom(skb) < rates + 2) return -ENOBUFS; skb_put_u8(skb, element_id); skb_put_u8(skb, rates); for (i = 0; i < sband->n_bitrates && rates; i++) { int rate; u8 basic; if ((rate_flags & sband->bitrates[i].flags) != rate_flags) continue; if (masked_rates & BIT(i)) continue; if (skip > 0) { skip--; continue; } basic = basic_rates & BIT(i) ? 0x80 : 0; rate = DIV_ROUND_UP(sband->bitrates[i].bitrate, 5); skb_put_u8(skb, basic | (u8)rate); rates--; } WARN(rates > 0, "rates confused: rates:%d, element:%d\n", rates, element_id); return 0; } int ieee80211_ave_rssi(struct ieee80211_vif *vif) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) return 0; return -ewma_beacon_signal_read(&sdata->deflink.u.mgd.ave_beacon_signal); } EXPORT_SYMBOL_GPL(ieee80211_ave_rssi); u8 ieee80211_mcs_to_chains(const struct ieee80211_mcs_info *mcs) { if (!mcs) return 1; /* TODO: consider rx_highest */ if (mcs->rx_mask[3]) return 4; if (mcs->rx_mask[2]) return 3; if (mcs->rx_mask[1]) return 2; return 1; } /** * ieee80211_calculate_rx_timestamp - calculate timestamp in frame * @local: mac80211 hw info struct * @status: RX status * @mpdu_len: total MPDU length (including FCS) * @mpdu_offset: offset into MPDU to calculate timestamp at * * This function calculates the RX timestamp at the given MPDU offset, taking * into account what the RX timestamp was. An offset of 0 will just normalize * the timestamp to TSF at beginning of MPDU reception. * * Returns: the calculated timestamp */ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local, struct ieee80211_rx_status *status, unsigned int mpdu_len, unsigned int mpdu_offset) { u64 ts = status->mactime; bool mactime_plcp_start; struct rate_info ri; u16 rate; u8 n_ltf; if (WARN_ON(!ieee80211_have_rx_timestamp(status))) return 0; mactime_plcp_start = (status->flag & RX_FLAG_MACTIME) == RX_FLAG_MACTIME_PLCP_START; memset(&ri, 0, sizeof(ri)); ri.bw = status->bw; /* Fill cfg80211 rate info */ switch (status->encoding) { case RX_ENC_EHT: ri.flags |= RATE_INFO_FLAGS_EHT_MCS; ri.mcs = status->rate_idx; ri.nss = status->nss; ri.eht_ru_alloc = status->eht.ru; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; /* TODO/FIXME: is this right? handle other PPDUs */ if (mactime_plcp_start) { mpdu_offset += 2; ts += 36; } break; case RX_ENC_HE: ri.flags |= RATE_INFO_FLAGS_HE_MCS; ri.mcs = status->rate_idx; ri.nss = status->nss; ri.he_ru_alloc = status->he_ru; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; /* * See P802.11ax_D6.0, section 27.3.4 for * VHT PPDU format. */ if (mactime_plcp_start) { mpdu_offset += 2; ts += 36; /* * TODO: * For HE MU PPDU, add the HE-SIG-B. * For HE ER PPDU, add 8us for the HE-SIG-A. * For HE TB PPDU, add 4us for the HE-STF. * Add the HE-LTF durations - variable. */ } break; case RX_ENC_HT: ri.mcs = status->rate_idx; ri.flags |= RATE_INFO_FLAGS_MCS; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; /* * See P802.11REVmd_D3.0, section 19.3.2 for * HT PPDU format. */ if (mactime_plcp_start) { mpdu_offset += 2; if (status->enc_flags & RX_ENC_FLAG_HT_GF) ts += 24; else ts += 32; /* * Add Data HT-LTFs per streams * TODO: add Extension HT-LTFs, 4us per LTF */ n_ltf = ((ri.mcs >> 3) & 3) + 1; n_ltf = n_ltf == 3 ? 4 : n_ltf; ts += n_ltf * 4; } break; case RX_ENC_VHT: ri.flags |= RATE_INFO_FLAGS_VHT_MCS; ri.mcs = status->rate_idx; ri.nss = status->nss; if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) ri.flags |= RATE_INFO_FLAGS_SHORT_GI; /* * See P802.11REVmd_D3.0, section 21.3.2 for * VHT PPDU format. */ if (mactime_plcp_start) { mpdu_offset += 2; ts += 36; /* * Add VHT-LTFs per streams */ n_ltf = (ri.nss != 1) && (ri.nss % 2) ? ri.nss + 1 : ri.nss; ts += 4 * n_ltf; } break; default: WARN_ON(1); fallthrough; case RX_ENC_LEGACY: { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[status->band]; ri.legacy = sband->bitrates[status->rate_idx].bitrate; if (mactime_plcp_start) { if (status->band == NL80211_BAND_5GHZ) { ts += 20; mpdu_offset += 2; } else if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) { ts += 96; } else { ts += 192; } } break; } } rate = cfg80211_calculate_bitrate(&ri); if (WARN_ONCE(!rate, "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n", (unsigned long long)status->flag, status->rate_idx, status->nss)) return 0; /* rewind from end of MPDU */ if ((status->flag & RX_FLAG_MACTIME) == RX_FLAG_MACTIME_END) ts -= mpdu_len * 8 * 10 / rate; ts += mpdu_offset * 8 * 10 / rate; return ts; } void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; struct cfg80211_chan_def chandef; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(sdata, &local->interfaces, list) { wiphy_delayed_work_cancel(local->hw.wiphy, &sdata->dfs_cac_timer_work); if (sdata->wdev.cac_started) { chandef = sdata->vif.bss_conf.chanreq.oper; ieee80211_link_release_channel(&sdata->deflink); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); } } } void ieee80211_dfs_radar_detected_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_local *local = container_of(work, struct ieee80211_local, radar_detected_work); struct cfg80211_chan_def chandef = local->hw.conf.chandef; struct ieee80211_chanctx *ctx; int num_chanctx = 0; lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) continue; num_chanctx++; chandef = ctx->conf.def; } ieee80211_dfs_cac_cancel(local); if (num_chanctx > 1) /* XXX: multi-channel is not supported yet */ WARN_ON(1); else cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL); } void ieee80211_radar_detected(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); trace_api_radar_detected(local); wiphy_work_queue(hw->wiphy, &local->radar_detected_work); } EXPORT_SYMBOL(ieee80211_radar_detected); void ieee80211_chandef_downgrade(struct cfg80211_chan_def *c, struct ieee80211_conn_settings *conn) { enum nl80211_chan_width new_primary_width; struct ieee80211_conn_settings _ignored = {}; /* allow passing NULL if caller doesn't care */ if (!conn) conn = &_ignored; again: /* no-HT indicates nothing to do */ new_primary_width = NL80211_CHAN_WIDTH_20_NOHT; switch (c->width) { default: case NL80211_CHAN_WIDTH_20_NOHT: WARN_ON_ONCE(1); fallthrough; case NL80211_CHAN_WIDTH_20: c->width = NL80211_CHAN_WIDTH_20_NOHT; conn->mode = IEEE80211_CONN_MODE_LEGACY; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; c->punctured = 0; break; case NL80211_CHAN_WIDTH_40: c->width = NL80211_CHAN_WIDTH_20; c->center_freq1 = c->chan->center_freq; if (conn->mode == IEEE80211_CONN_MODE_VHT) conn->mode = IEEE80211_CONN_MODE_HT; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; c->punctured = 0; break; case NL80211_CHAN_WIDTH_80: new_primary_width = NL80211_CHAN_WIDTH_40; if (conn->mode == IEEE80211_CONN_MODE_VHT) conn->mode = IEEE80211_CONN_MODE_HT; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_40; break; case NL80211_CHAN_WIDTH_80P80: c->center_freq2 = 0; c->width = NL80211_CHAN_WIDTH_80; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80; break; case NL80211_CHAN_WIDTH_160: new_primary_width = NL80211_CHAN_WIDTH_80; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_80; break; case NL80211_CHAN_WIDTH_320: new_primary_width = NL80211_CHAN_WIDTH_160; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_160; break; case NL80211_CHAN_WIDTH_1: case NL80211_CHAN_WIDTH_2: case NL80211_CHAN_WIDTH_4: case NL80211_CHAN_WIDTH_8: case NL80211_CHAN_WIDTH_16: WARN_ON_ONCE(1); /* keep c->width */ conn->mode = IEEE80211_CONN_MODE_S1G; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; break; case NL80211_CHAN_WIDTH_5: case NL80211_CHAN_WIDTH_10: WARN_ON_ONCE(1); /* keep c->width */ conn->mode = IEEE80211_CONN_MODE_LEGACY; conn->bw_limit = IEEE80211_CONN_BW_LIMIT_20; break; } if (new_primary_width != NL80211_CHAN_WIDTH_20_NOHT) { c->center_freq1 = cfg80211_chandef_primary(c, new_primary_width, &c->punctured); c->width = new_primary_width; } /* * With an 80 MHz channel, we might have the puncturing in the primary * 40 Mhz channel, but that's not valid when downgraded to 40 MHz width. * In that case, downgrade again. */ if (!cfg80211_chandef_valid(c) && c->punctured) goto again; WARN_ON_ONCE(!cfg80211_chandef_valid(c)); } /* * Returns true if smps_mode_new is strictly more restrictive than * smps_mode_old. */ bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old, enum ieee80211_smps_mode smps_mode_new) { if (WARN_ON_ONCE(smps_mode_old == IEEE80211_SMPS_AUTOMATIC || smps_mode_new == IEEE80211_SMPS_AUTOMATIC)) return false; switch (smps_mode_old) { case IEEE80211_SMPS_STATIC: return false; case IEEE80211_SMPS_DYNAMIC: return smps_mode_new == IEEE80211_SMPS_STATIC; case IEEE80211_SMPS_OFF: return smps_mode_new != IEEE80211_SMPS_OFF; default: WARN_ON(1); } return false; } int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata, struct cfg80211_csa_settings *csa_settings) { struct sk_buff *skb; struct ieee80211_mgmt *mgmt; struct ieee80211_local *local = sdata->local; int freq; int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.chan_switch); u8 *pos; if (sdata->vif.type != NL80211_IFTYPE_ADHOC && sdata->vif.type != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; skb = dev_alloc_skb(local->tx_headroom + hdr_len + 5 + /* channel switch announcement element */ 3 + /* secondary channel offset element */ 5 + /* wide bandwidth channel switch announcement */ 8); /* mesh channel switch parameters element */ if (!skb) return -ENOMEM; skb_reserve(skb, local->tx_headroom); mgmt = skb_put_zero(skb, hdr_len); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION); eth_broadcast_addr(mgmt->da); memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); if (ieee80211_vif_is_mesh(&sdata->vif)) { memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); } else { struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN); } mgmt->u.action.category = WLAN_CATEGORY_SPECTRUM_MGMT; mgmt->u.action.u.chan_switch.action_code = WLAN_ACTION_SPCT_CHL_SWITCH; pos = skb_put(skb, 5); *pos++ = WLAN_EID_CHANNEL_SWITCH; /* EID */ *pos++ = 3; /* IE length */ *pos++ = csa_settings->block_tx ? 1 : 0; /* CSA mode */ freq = csa_settings->chandef.chan->center_freq; *pos++ = ieee80211_frequency_to_channel(freq); /* channel */ *pos++ = csa_settings->count; /* count */ if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_40) { enum nl80211_channel_type ch_type; skb_put(skb, 3); *pos++ = WLAN_EID_SECONDARY_CHANNEL_OFFSET; /* EID */ *pos++ = 1; /* IE length */ ch_type = cfg80211_get_chandef_type(&csa_settings->chandef); if (ch_type == NL80211_CHAN_HT40PLUS) *pos++ = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; else *pos++ = IEEE80211_HT_PARAM_CHA_SEC_BELOW; } if (ieee80211_vif_is_mesh(&sdata->vif)) { struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; skb_put(skb, 8); *pos++ = WLAN_EID_CHAN_SWITCH_PARAM; /* EID */ *pos++ = 6; /* IE length */ *pos++ = sdata->u.mesh.mshcfg.dot11MeshTTL; /* Mesh TTL */ *pos = 0x00; /* Mesh Flag: Tx Restrict, Initiator, Reason */ *pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR; *pos++ |= csa_settings->block_tx ? WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00; put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */ pos += 2; put_unaligned_le16(ifmsh->pre_value, pos);/* Precedence Value */ pos += 2; } if (csa_settings->chandef.width == NL80211_CHAN_WIDTH_80 || csa_settings->chandef.width == NL80211_CHAN_WIDTH_80P80 || csa_settings->chandef.width == NL80211_CHAN_WIDTH_160) { skb_put(skb, 5); ieee80211_ie_build_wide_bw_cs(pos, &csa_settings->chandef); } ieee80211_tx_skb(sdata, skb); return 0; } static bool ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i) { s32 end = data->desc[i].start + data->desc[i].duration - (tsf + 1); int skip; if (end > 0) return false; /* One shot NOA */ if (data->count[i] == 1) return false; if (data->desc[i].interval == 0) return false; /* End time is in the past, check for repetitions */ skip = DIV_ROUND_UP(-end, data->desc[i].interval); if (data->count[i] < 255) { if (data->count[i] <= skip) { data->count[i] = 0; return false; } data->count[i] -= skip; } data->desc[i].start += skip * data->desc[i].interval; return true; } static bool ieee80211_extend_absent_time(struct ieee80211_noa_data *data, u32 tsf, s32 *offset) { bool ret = false; int i; for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { s32 cur; if (!data->count[i]) continue; if (ieee80211_extend_noa_desc(data, tsf + *offset, i)) ret = true; cur = data->desc[i].start - tsf; if (cur > *offset) continue; cur = data->desc[i].start + data->desc[i].duration - tsf; if (cur > *offset) *offset = cur; } return ret; } static u32 ieee80211_get_noa_absent_time(struct ieee80211_noa_data *data, u32 tsf) { s32 offset = 0; int tries = 0; /* * arbitrary limit, used to avoid infinite loops when combined NoA * descriptors cover the full time period. */ int max_tries = 5; ieee80211_extend_absent_time(data, tsf, &offset); do { if (!ieee80211_extend_absent_time(data, tsf, &offset)) break; tries++; } while (tries < max_tries); return offset; } void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf) { u32 next_offset = BIT(31) - 1; int i; data->absent = 0; data->has_next_tsf = false; for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { s32 start; if (!data->count[i]) continue; ieee80211_extend_noa_desc(data, tsf, i); start = data->desc[i].start - tsf; if (start <= 0) data->absent |= BIT(i); if (next_offset > start) next_offset = start; data->has_next_tsf = true; } if (data->absent) next_offset = ieee80211_get_noa_absent_time(data, tsf); data->next_tsf = tsf + next_offset; } EXPORT_SYMBOL(ieee80211_update_p2p_noa); int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr, struct ieee80211_noa_data *data, u32 tsf) { int ret = 0; int i; memset(data, 0, sizeof(*data)); for (i = 0; i < IEEE80211_P2P_NOA_DESC_MAX; i++) { const struct ieee80211_p2p_noa_desc *desc = &attr->desc[i]; if (!desc->count || !desc->duration) continue; data->count[i] = desc->count; data->desc[i].start = le32_to_cpu(desc->start_time); data->desc[i].duration = le32_to_cpu(desc->duration); data->desc[i].interval = le32_to_cpu(desc->interval); if (data->count[i] > 1 && data->desc[i].interval < data->desc[i].duration) continue; ieee80211_extend_noa_desc(data, tsf, i); ret++; } if (ret) ieee80211_update_p2p_noa(data, tsf); return ret; } EXPORT_SYMBOL(ieee80211_parse_p2p_noa); void ieee80211_recalc_dtim(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { u64 tsf = drv_get_tsf(local, sdata); u64 dtim_count = 0; u16 beacon_int = sdata->vif.bss_conf.beacon_int * 1024; u8 dtim_period = sdata->vif.bss_conf.dtim_period; struct ps_data *ps; u8 bcns_from_dtim; if (tsf == -1ULL || !beacon_int || !dtim_period) return; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { if (!sdata->bss) return; ps = &sdata->bss->ps; } else if (ieee80211_vif_is_mesh(&sdata->vif)) { ps = &sdata->u.mesh.ps; } else { return; } /* * actually finds last dtim_count, mac80211 will update in * __beacon_add_tim(). * dtim_count = dtim_period - (tsf / bcn_int) % dtim_period */ do_div(tsf, beacon_int); bcns_from_dtim = do_div(tsf, dtim_period); /* just had a DTIM */ if (!bcns_from_dtim) dtim_count = 0; else dtim_count = dtim_period - bcns_from_dtim; ps->dtim_count = dtim_count; } static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { struct ieee80211_link_data *link; u8 radar_detect = 0; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED)) return 0; list_for_each_entry(link, &ctx->reserved_links, reserved_chanctx_list) if (link->reserved_radar_required) radar_detect |= BIT(link->reserved.oper.width); /* * An in-place reservation context should not have any assigned vifs * until it replaces the other context. */ WARN_ON(ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER && !list_empty(&ctx->assigned_links)); list_for_each_entry(link, &ctx->assigned_links, assigned_chanctx_list) { if (!link->radar_required) continue; radar_detect |= BIT(link->conf->chanreq.oper.width); } return radar_detect; } static u32 __ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata) { struct ieee80211_bss_conf *link_conf; struct ieee80211_chanctx_conf *conf; unsigned int link_id; u32 mask = 0; for_each_vif_active_link(&sdata->vif, link_conf, link_id) { conf = sdata_dereference(link_conf->chanctx_conf, sdata); if (!conf || conf->radio_idx < 0) continue; mask |= BIT(conf->radio_idx); } return mask; } u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); return __ieee80211_get_radio_mask(sdata); } static bool ieee80211_sdata_uses_radio(struct ieee80211_sub_if_data *sdata, int radio_idx) { if (radio_idx < 0) return true; return __ieee80211_get_radio_mask(sdata) & BIT(radio_idx); } static int ieee80211_fill_ifcomb_params(struct ieee80211_local *local, struct iface_combination_params *params, const struct cfg80211_chan_def *chandef, struct ieee80211_sub_if_data *sdata) { struct ieee80211_sub_if_data *sdata_iter; struct ieee80211_chanctx *ctx; int total = !!sdata; list_for_each_entry(ctx, &local->chanctx_list, list) { if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) continue; if (params->radio_idx >= 0 && ctx->conf.radio_idx != params->radio_idx) continue; params->radar_detect |= ieee80211_chanctx_radar_detect(local, ctx); if (chandef && ctx->mode != IEEE80211_CHANCTX_EXCLUSIVE && cfg80211_chandef_compatible(chandef, &ctx->conf.def)) continue; params->num_different_channels++; } list_for_each_entry(sdata_iter, &local->interfaces, list) { struct wireless_dev *wdev_iter; wdev_iter = &sdata_iter->wdev; if (sdata_iter == sdata || !ieee80211_sdata_running(sdata_iter) || cfg80211_iftype_allowed(local->hw.wiphy, wdev_iter->iftype, 0, 1)) continue; if (!ieee80211_sdata_uses_radio(sdata_iter, params->radio_idx)) continue; params->iftype_num[wdev_iter->iftype]++; total++; } return total; } int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode chanmode, u8 radar_detect, int radio_idx) { bool shared = chanmode == IEEE80211_CHANCTX_SHARED; struct ieee80211_local *local = sdata->local; enum nl80211_iftype iftype = sdata->wdev.iftype; struct iface_combination_params params = { .radar_detect = radar_detect, .radio_idx = radio_idx, }; int total; lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(hweight32(radar_detect) > 1)) return -EINVAL; if (WARN_ON(chandef && chanmode == IEEE80211_CHANCTX_SHARED && !chandef->chan)) return -EINVAL; if (WARN_ON(iftype >= NUM_NL80211_IFTYPES)) return -EINVAL; if (sdata->vif.type == NL80211_IFTYPE_AP || sdata->vif.type == NL80211_IFTYPE_MESH_POINT) { /* * always passing this is harmless, since it'll be the * same value that cfg80211 finds if it finds the same * interface ... and that's always allowed */ params.new_beacon_int = sdata->vif.bss_conf.beacon_int; } /* Always allow software iftypes */ if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) { if (radar_detect) return -EINVAL; return 0; } if (chandef) params.num_different_channels = 1; if (iftype != NL80211_IFTYPE_UNSPECIFIED) params.iftype_num[iftype] = 1; total = ieee80211_fill_ifcomb_params(local, ¶ms, shared ? chandef : NULL, sdata); if (total == 1 && !params.radar_detect) return 0; return cfg80211_check_combinations(local->hw.wiphy, ¶ms); } static void ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c, void *data) { u32 *max_num_different_channels = data; *max_num_different_channels = max(*max_num_different_channels, c->num_different_channels); } int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx) { u32 max_num_different_channels = 1; int err; struct iface_combination_params params = { .radio_idx = radio_idx, }; lockdep_assert_wiphy(local->hw.wiphy); ieee80211_fill_ifcomb_params(local, ¶ms, NULL, NULL); err = cfg80211_iter_combinations(local->hw.wiphy, ¶ms, ieee80211_iter_max_chans, &max_num_different_channels); if (err < 0) return err; return max_num_different_channels; } void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata, struct ieee80211_sta_s1g_cap *caps, struct sk_buff *skb) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_s1g_cap s1g_capab; u8 *pos; int i; if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) return; if (!caps->s1g) return; memcpy(s1g_capab.capab_info, caps->cap, sizeof(caps->cap)); memcpy(s1g_capab.supp_mcs_nss, caps->nss_mcs, sizeof(caps->nss_mcs)); /* override the capability info */ for (i = 0; i < sizeof(ifmgd->s1g_capa.capab_info); i++) { u8 mask = ifmgd->s1g_capa_mask.capab_info[i]; s1g_capab.capab_info[i] &= ~mask; s1g_capab.capab_info[i] |= ifmgd->s1g_capa.capab_info[i] & mask; } /* then MCS and NSS set */ for (i = 0; i < sizeof(ifmgd->s1g_capa.supp_mcs_nss); i++) { u8 mask = ifmgd->s1g_capa_mask.supp_mcs_nss[i]; s1g_capab.supp_mcs_nss[i] &= ~mask; s1g_capab.supp_mcs_nss[i] |= ifmgd->s1g_capa.supp_mcs_nss[i] & mask; } pos = skb_put(skb, 2 + sizeof(s1g_capab)); *pos++ = WLAN_EID_S1G_CAPABILITIES; *pos++ = sizeof(s1g_capab); memcpy(pos, &s1g_capab, sizeof(s1g_capab)); } void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) { u8 *pos = skb_put(skb, 3); *pos++ = WLAN_EID_AID_REQUEST; *pos++ = 1; *pos++ = 0; } u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo) { *buf++ = WLAN_EID_VENDOR_SPECIFIC; *buf++ = 7; /* len */ *buf++ = 0x00; /* Microsoft OUI 00:50:F2 */ *buf++ = 0x50; *buf++ = 0xf2; *buf++ = 2; /* WME */ *buf++ = 0; /* WME info */ *buf++ = 1; /* WME ver */ *buf++ = qosinfo; /* U-APSD no in use */ return buf; } void ieee80211_txq_get_depth(struct ieee80211_txq *txq, unsigned long *frame_cnt, unsigned long *byte_cnt) { struct txq_info *txqi = to_txq_info(txq); u32 frag_cnt = 0, frag_bytes = 0; struct sk_buff *skb; skb_queue_walk(&txqi->frags, skb) { frag_cnt++; frag_bytes += skb->len; } if (frame_cnt) *frame_cnt = txqi->tin.backlog_packets + frag_cnt; if (byte_cnt) *byte_cnt = txqi->tin.backlog_bytes + frag_bytes; } EXPORT_SYMBOL(ieee80211_txq_get_depth); const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS] = { IEEE80211_WMM_IE_STA_QOSINFO_AC_VO, IEEE80211_WMM_IE_STA_QOSINFO_AC_VI, IEEE80211_WMM_IE_STA_QOSINFO_AC_BE, IEEE80211_WMM_IE_STA_QOSINFO_AC_BK }; u16 ieee80211_encode_usf(int listen_interval) { static const int listen_int_usf[] = { 1, 10, 1000, 10000 }; u16 ui, usf = 0; /* find greatest USF */ while (usf < IEEE80211_MAX_USF) { if (listen_interval % listen_int_usf[usf + 1]) break; usf += 1; } ui = listen_interval / listen_int_usf[usf]; /* error if there is a remainder. Should've been checked by user */ WARN_ON_ONCE(ui > IEEE80211_MAX_UI); listen_interval = FIELD_PREP(LISTEN_INT_USF, usf) | FIELD_PREP(LISTEN_INT_UI, ui); return (u16) listen_interval; } /* this may return more than ieee80211_put_eht_cap() will need */ u8 ieee80211_ie_len_eht_cap(struct ieee80211_sub_if_data *sdata) { const struct ieee80211_sta_he_cap *he_cap; const struct ieee80211_sta_eht_cap *eht_cap; struct ieee80211_supported_band *sband; bool is_ap; u8 n; sband = ieee80211_get_sband(sdata); if (!sband) return 0; he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); if (!he_cap || !eht_cap) return 0; is_ap = sdata->vif.type == NL80211_IFTYPE_AP; n = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, &eht_cap->eht_cap_elem, is_ap); return 2 + 1 + sizeof(eht_cap->eht_cap_elem) + n + ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], eht_cap->eht_cap_elem.phy_cap_info); return 0; } int ieee80211_put_eht_cap(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata, const struct ieee80211_supported_band *sband, const struct ieee80211_conn_settings *conn) { const struct ieee80211_sta_he_cap *he_cap = ieee80211_get_he_iftype_cap_vif(sband, &sdata->vif); const struct ieee80211_sta_eht_cap *eht_cap = ieee80211_get_eht_iftype_cap_vif(sband, &sdata->vif); bool for_ap = sdata->vif.type == NL80211_IFTYPE_AP; struct ieee80211_eht_cap_elem_fixed fixed; struct ieee80211_he_cap_elem he; u8 mcs_nss_len, ppet_len; u8 orig_mcs_nss_len; u8 ie_len; if (!conn) conn = &ieee80211_conn_settings_unlimited; /* Make sure we have place for the IE */ if (!he_cap || !eht_cap) return 0; orig_mcs_nss_len = ieee80211_eht_mcs_nss_size(&he_cap->he_cap_elem, &eht_cap->eht_cap_elem, for_ap); ieee80211_get_adjusted_he_cap(conn, he_cap, &he); fixed = eht_cap->eht_cap_elem; if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_80) fixed.phy_cap_info[6] &= ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_80MHZ; if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_160) { fixed.phy_cap_info[1] &= ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK; fixed.phy_cap_info[2] &= ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK; fixed.phy_cap_info[6] &= ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_160MHZ; } if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320) { fixed.phy_cap_info[0] &= ~IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ; fixed.phy_cap_info[1] &= ~IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK; fixed.phy_cap_info[2] &= ~IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK; fixed.phy_cap_info[6] &= ~IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_320MHZ; } if (conn->bw_limit == IEEE80211_CONN_BW_LIMIT_20) fixed.phy_cap_info[0] &= ~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ; mcs_nss_len = ieee80211_eht_mcs_nss_size(&he, &fixed, for_ap); ppet_len = ieee80211_eht_ppe_size(eht_cap->eht_ppe_thres[0], fixed.phy_cap_info); ie_len = 2 + 1 + sizeof(eht_cap->eht_cap_elem) + mcs_nss_len + ppet_len; if (skb_tailroom(skb) < ie_len) return -ENOBUFS; skb_put_u8(skb, WLAN_EID_EXTENSION); skb_put_u8(skb, ie_len - 2); skb_put_u8(skb, WLAN_EID_EXT_EHT_CAPABILITY); skb_put_data(skb, &fixed, sizeof(fixed)); if (mcs_nss_len == 4 && orig_mcs_nss_len != 4) { /* * If the (non-AP) STA became 20 MHz only, then convert from * <=80 to 20-MHz-only format, where MCSes are indicated in * the groups 0-7, 8-9, 10-11, 12-13 rather than just 0-9, * 10-11, 12-13. Thus, use 0-9 for 0-7 and 8-9. */ skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss); skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs9_max_nss); skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs11_max_nss); skb_put_u8(skb, eht_cap->eht_mcs_nss_supp.bw._80.rx_tx_mcs13_max_nss); } else { skb_put_data(skb, &eht_cap->eht_mcs_nss_supp, mcs_nss_len); } if (ppet_len) skb_put_data(skb, &eht_cap->eht_ppe_thres, ppet_len); return 0; } const char *ieee80211_conn_mode_str(enum ieee80211_conn_mode mode) { static const char * const modes[] = { [IEEE80211_CONN_MODE_S1G] = "S1G", [IEEE80211_CONN_MODE_LEGACY] = "legacy", [IEEE80211_CONN_MODE_HT] = "HT", [IEEE80211_CONN_MODE_VHT] = "VHT", [IEEE80211_CONN_MODE_HE] = "HE", [IEEE80211_CONN_MODE_EHT] = "EHT", }; if (WARN_ON(mode >= ARRAY_SIZE(modes))) return "<out of range>"; return modes[mode] ?: "<missing string>"; } enum ieee80211_conn_bw_limit ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef) { switch (chandef->width) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: return IEEE80211_CONN_BW_LIMIT_20; case NL80211_CHAN_WIDTH_40: return IEEE80211_CONN_BW_LIMIT_40; case NL80211_CHAN_WIDTH_80: return IEEE80211_CONN_BW_LIMIT_80; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: return IEEE80211_CONN_BW_LIMIT_160; case NL80211_CHAN_WIDTH_320: return IEEE80211_CONN_BW_LIMIT_320; default: WARN(1, "unhandled chandef width %d\n", chandef->width); return IEEE80211_CONN_BW_LIMIT_20; } } void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe) { for (int i = 0; i < 2; i++) { tpe->max_local[i].valid = false; memset(tpe->max_local[i].power, IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT, sizeof(tpe->max_local[i].power)); tpe->max_reg_client[i].valid = false; memset(tpe->max_reg_client[i].power, IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT, sizeof(tpe->max_reg_client[i].power)); tpe->psd_local[i].valid = false; memset(tpe->psd_local[i].power, IEEE80211_TPE_PSD_NO_LIMIT, sizeof(tpe->psd_local[i].power)); tpe->psd_reg_client[i].valid = false; memset(tpe->psd_reg_client[i].power, IEEE80211_TPE_PSD_NO_LIMIT, sizeof(tpe->psd_reg_client[i].power)); } } |
| 62452 19398 62454 14324 14325 49950 49945 49933 85 1399 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 | /* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * http://lse.sourceforge.net/locking/rcupdate.html * */ #ifndef __LINUX_RCUPDATE_H #define __LINUX_RCUPDATE_H #include <linux/types.h> #include <linux/compiler.h> #include <linux/atomic.h> #include <linux/irqflags.h> #include <linux/preempt.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> #include <linux/cleanup.h> #include <asm/processor.h> #include <linux/context_tracking_irq.h> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void rcu_barrier_tasks_rude(void); void synchronize_rcu(void); struct rcu_gp_oldstate; unsigned long get_completed_synchronize_rcu(void); void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); // Maximum number of unsigned long values corresponding to // not-yet-completed RCU grace periods. #define NUM_ACTIVE_RCU_POLL_OLDSTATE 2 /** * same_state_synchronize_rcu - Are two old-state values identical? * @oldstate1: First old-state value. * @oldstate2: Second old-state value. * * The two old-state values must have been obtained from either * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or * get_completed_synchronize_rcu(). Returns @true if the two values are * identical and @false otherwise. This allows structures whose lifetimes * are tracked by old-state values to push these values to a list header, * allowing those structures to be slightly smaller. */ static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2) { return oldstate1 == oldstate2; } #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); void __rcu_read_unlock(void); /* * Defined as a macro as it is a very low level header included from * areas that don't even know about current. This gives the rcu_read_lock() * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TINY_RCU #define rcu_read_unlock_strict() do { } while (0) #else void rcu_read_unlock_strict(void); #endif static inline void __rcu_read_lock(void) { preempt_disable(); } static inline void __rcu_read_unlock(void) { preempt_enable(); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) rcu_read_unlock_strict(); } static inline int rcu_preempt_depth(void) { return 0; } #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_RCU_LAZY void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func); #else static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) { call_rcu(head, func); } #endif /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active; void rcu_sched_clock_irq(int user); #ifdef CONFIG_TASKS_RCU_GENERIC void rcu_init_tasks_generic(void); #else static inline void rcu_init_tasks_generic(void) { } #endif #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); #else /* #ifdef CONFIG_RCU_STALL_COMMON */ static inline void rcu_sysrq_start(void) { } static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) void rcu_irq_work_resched(void); #else static inline void rcu_irq_work_resched(void) { } #endif #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_deoffload(int cpu); void rcu_nocb_flush_deferred_wakeup(void); #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } static inline void rcu_nocb_flush_deferred_wakeup(void) { } #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /* * Note a quasi-voluntary context switch for RCU-tasks's benefit. * This is a macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU_GENERIC # ifdef CONFIG_TASKS_RCU # define rcu_tasks_classic_qs(t, preempt) \ do { \ if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); # else # define rcu_tasks_classic_qs(t, preempt) do { } while (0) # define call_rcu_tasks call_rcu # define synchronize_rcu_tasks synchronize_rcu # endif # ifdef CONFIG_TASKS_TRACE_RCU // Bits for ->trc_reader_special.b.need_qs field. #define TRC_NEED_QS 0x1 // Task needs a quiescent state. #define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state. u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new); void rcu_tasks_trace_qs_blkd(struct task_struct *t); # define rcu_tasks_trace_qs(t) \ do { \ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ \ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \ likely(!___rttq_nesting)) { \ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ rcu_tasks_trace_qs_blkd(t); \ } \ } while (0) # else # define rcu_tasks_trace_qs(t) do { } while (0) # endif #define rcu_tasks_qs(t, preempt) \ do { \ rcu_tasks_classic_qs((t), (preempt)); \ rcu_tasks_trace_qs(t); \ } while (0) # ifdef CONFIG_TASKS_RUDE_RCU void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks_rude(void); # endif #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #define rcu_tasks_classic_qs(t, preempt) do { } while (0) #define rcu_tasks_qs(t, preempt) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0) #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? * * As an accident of implementation, an RCU Tasks Trace grace period also * acts as an RCU grace period. However, this could change at any time. * Code relying on this accident must call this function to verify that * this accident is still happening. * * You have been warned! */ static inline bool rcu_trace_implies_rcu_gp(void) { return true; } /** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to * report potential quiescent states to RCU-tasks even if the cond_resched() * machinery were to be shut off, as some advocate for PREEMPTION kernels. */ #define cond_resched_tasks_rcu_qs() \ do { \ rcu_tasks_qs(current, false); \ cond_resched(); \ } while (0) /** * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states * @old_ts: jiffies at start of processing. * * This helper is for long-running softirq handlers, such as NAPI threads in * networking. The caller should initialize the variable passed in as @old_ts * at the beginning of the softirq handler. When invoked frequently, this macro * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will * provide both RCU and RCU-Tasks quiescent states. Note that this macro * modifies its old_ts argument. * * Because regions of code that have disabled softirq act as RCU read-side * critical sections, this macro should be invoked with softirq (and * preemption) enabled. * * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would * have more chance to invoke schedule() calls and provide necessary quiescent * states. As a contrast, calling cond_resched() only won't achieve the same * effect because cond_resched() does not provide RCU-Tasks quiescent states. */ #define rcu_softirq_qs_periodic(old_ts) \ do { \ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \ time_after(jiffies, (old_ts) + HZ / 10)) { \ preempt_disable(); \ rcu_softirq_qs(); \ preempt_enable(); \ (old_ts) = jiffies; \ } \ } while (0) /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ #if defined(CONFIG_TREE_RCU) #include <linux/rcutree.h> #elif defined(CONFIG_TINY_RCU) #include <linux/rcutiny.h> #else #error "Unknown RCU implementation specified to kernel configuration" #endif /* * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls * are needed for dynamic initialization and destruction of rcu_head * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for * dynamic initialization and destruction of statically allocated rcu_head * structures. However, rcu_head structures allocated dynamically in the * heap don't need any initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD void init_rcu_head(struct rcu_head *head); void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ static inline void init_rcu_head(struct rcu_head *head) { } static inline void destroy_rcu_head(struct rcu_head *head) { } static inline void init_rcu_head_on_stack(struct rcu_head *head) { } static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ static inline bool rcu_lockdep_current_cpu_online(void) { return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_bh_lock_map; extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void rcu_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); } static inline void rcu_try_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_); } static inline void rcu_lock_release(struct lockdep_map *map) { lock_release(map, _THIS_IP_); } int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); int rcu_read_lock_sched_held(void); int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ # define rcu_lock_acquire(a) do { } while (0) # define rcu_try_lock_acquire(a) do { } while (0) # define rcu_lock_release(a) do { } while (0) static inline int rcu_read_lock_held(void) { return 1; } static inline int rcu_read_lock_bh_held(void) { return 1; } static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } static inline int rcu_read_lock_any_held(void) { return !preemptible(); } static inline int debug_lockdep_rcu_enabled(void) { return 0; } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU /** * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message * * This checks debug_lockdep_rcu_enabled() before checking (c) to * prevent early boot splats due to lockdep not yet being initialized, * and rechecks it after checking (c) to prevent false-positive splats * due to races with lockdep being disabled. See commit 3066820034b5dd * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail. */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data.unlikely") __warned; \ if (debug_lockdep_rcu_enabled() && (c) && \ debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ } while (0) #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal context switch in RCU read-side critical section"); } #else // #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { } #endif // #else // #ifndef CONFIG_PREEMPT_RCU #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ } while (0) // See RCU_LOCKDEP_WARN() for an explanation of the double call to // debug_lockdep_rcu_enabled(). static inline bool lockdep_assert_rcu_helper(bool c) { return debug_lockdep_rcu_enabled() && (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) && debug_lockdep_rcu_enabled(); } /** * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock() * * Splats if lockdep is enabled and there is no rcu_read_lock() in effect. */ #define lockdep_assert_in_rcu_read_lock() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map))) /** * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh() * * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect. * Note that local_bh_disable() and friends do not suffice here, instead an * actual rcu_read_lock_bh() is required. */ #define lockdep_assert_in_rcu_read_lock_bh() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map))) /** * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched() * * Splats if lockdep is enabled and there is no rcu_read_lock_sched() * in effect. Note that preempt_disable() and friends do not suffice here, * instead an actual rcu_read_lock_sched() is required. */ #define lockdep_assert_in_rcu_read_lock_sched() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map))) /** * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader * * Splats if lockdep is enabled and there is no RCU reader of any * type in effect. Note that regions of code protected by things like * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify * as RCU readers. * * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY * kernels that are not also built with PREEMPT_COUNT. But if you have * lockdep enabled, you might as well also enable PREEMPT_COUNT. */ #define lockdep_assert_in_rcu_reader() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \ !lock_is_held(&rcu_bh_lock_map) && \ !lock_is_held(&rcu_sched_lock_map) && \ preemptible())) #else /* #ifdef CONFIG_PROVE_RCU */ #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) #define rcu_sleep_check() do { } while (0) #define lockdep_assert_in_rcu_read_lock() do { } while (0) #define lockdep_assert_in_rcu_read_lock_bh() do { } while (0) #define lockdep_assert_in_rcu_read_lock_sched() do { } while (0) #define lockdep_assert_in_rcu_reader() do { } while (0) #endif /* #else #ifdef CONFIG_PROVE_RCU */ /* * Helper functions for rcu_dereference_check(), rcu_dereference_protected() * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of * multiple pointers markings to match different RCU implementations * (e.g., __srcu), should this make sense in the future. */ #ifdef __CHECKER__ #define rcu_check_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ #define __unrcu_pointer(p, local) \ ({ \ typeof(*p) *local = (typeof(*p) *__force)(p); \ rcu_check_sparse(p, __rcu); \ ((typeof(*p) __force __kernel *)(local)); \ }) /** * unrcu_pointer - mark a pointer as not being RCU protected * @p: pointer needing to lose its __rcu property * * Converts @p from an __rcu pointer to a __kernel pointer. * This allows an __rcu pointer to be used with xchg() and friends. */ #define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu)) #define __rcu_access_pointer(p, local, space) \ ({ \ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define __rcu_dereference_check(p, local, c, space) \ ({ \ /* Dependency order vs. p above. */ \ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define __rcu_dereference_protected(p, local, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define __rcu_dereference_raw(p, local) \ ({ \ /* Dependency order vs. p above. */ \ typeof(p) local = READ_ONCE(p); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu)) /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable * @v: The value to statically initialize with. */ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) /** * rcu_assign_pointer() - assign to RCU-protected pointer * @p: pointer to assign to * @v: value to assign (publish) * * Assigns the specified value to the specified RCU-protected * pointer, ensuring that any concurrent RCU readers will see * any prior initialization. * * Inserts memory barriers on architectures that require them * (which is most of them), and also prevents the compiler from * reordering the code that initializes the structure after the pointer * assignment. More importantly, this call documents which pointers * will be dereferenced by RCU read-side code. * * In some special cases, you may use RCU_INIT_POINTER() instead * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due * to the fact that it does not constrain either the CPU or the compiler. * That said, using RCU_INIT_POINTER() when you should have used * rcu_assign_pointer() is a very bad thing that results in * impossible-to-diagnose memory corruption. So please be careful. * See the RCU_INIT_POINTER() comment header for details. * * Note that rcu_assign_pointer() evaluates each of its arguments only * once, appearances notwithstanding. One of the "extra" evaluations * is in typeof() and the other visible only to sparse (__CHECKER__), * neither of which actually execute the argument. As with most cpp * macros, this execute-arguments-only-once property is important, so * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ #define rcu_assign_pointer(p, v) \ do { \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ else \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ } while (0) /** * rcu_replace_pointer() - replace an RCU pointer, returning its old value * @rcu_ptr: RCU pointer, whose old value is returned * @ptr: regular pointer * @c: the lockdep conditions under which the dereference will take place * * Perform a replacement, where @rcu_ptr is an RCU-annotated * pointer and @c is the lockdep argument that is passed to the * rcu_dereference_protected() call used to read that pointer. The old * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. */ #define rcu_replace_pointer(rcu_ptr, ptr, c) \ ({ \ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ __tmp; \ }) /** * rcu_access_pointer() - fetch RCU pointer with no dereferencing * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the * lockdep checks for being in an RCU read-side critical section. This is * useful when the value of this pointer is accessed, but the pointer is * not dereferenced, for example, when testing an RCU-protected pointer * against NULL. Although rcu_access_pointer() may also be used in cases * where update-side locks prevent the value of the pointer from changing, * you should instead use rcu_dereference_protected() for this use case. * Within an RCU read-side critical section, there is little reason to * use rcu_access_pointer(). * * It is usually best to test the rcu_access_pointer() return value * directly in order to avoid accidental dereferences being introduced * by later inattentive changes. In other words, assigning the * rcu_access_pointer() return value to a local variable results in an * accident waiting to happen. * * It is also permissible to use rcu_access_pointer() when read-side * access to the pointer was removed at least one grace period ago, as is * the case in the context of the RCU callback that is freeing up the data, * or after a synchronize_rcu() returns. This can be useful when tearing * down multi-linked structures after a grace period has elapsed. However, * rcu_dereference_protected() is normally preferred for this use case. */ #define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu) /** * rcu_dereference_check() - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Do an rcu_dereference(), but check that the conditions under which the * dereference will take place are correct. Typically the conditions * indicate the various locking conditions that should be held at that * point. The check should return true if the conditions are satisfied. * An implicit check for being in an RCU read-side critical section * (rcu_read_lock()) is included. * * For example: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); * * could be used to indicate to lockdep that foo->bar may only be dereferenced * if either rcu_read_lock() is held, or that the lock required to replace * the bar struct at foo->bar is held. * * Note that the list of conditions may also include indications of when a lock * need not be held, for example during initialisation or destruction of the * target struct: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || * atomic_read(&foo->usage) == 0); * * Inserts memory barriers on architectures that require them * (currently only the Alpha), prevents the compiler from refetching * (and from merging fetches), and, more importantly, documents exactly * which pointers are protected by RCU and checks that the pointer is * annotated as __rcu. */ #define rcu_dereference_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_held(), __rcu) /** * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-bh counterpart to rcu_dereference_check(). However, * please note that starting in v5.0 kernels, vanilla RCU grace periods * wait for local_bh_disable() regions of code in addition to regions of * code demarked by rcu_read_lock() and rcu_read_unlock(). This means * that synchronize_rcu(), call_rcu, and friends all take not only * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_bh_held(), __rcu) /** * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). * However, please note that starting in v5.0 kernels, vanilla RCU grace * periods wait for preempt_disable() regions of code in addition to * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). * This means that synchronize_rcu(), call_rcu, and friends all take not * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_sched_held(), \ __rcu) /* * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_check(p) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit * the READ_ONCE(). This is useful in cases where update-side locks * prevent the value of the pointer from changing. Please note that this * primitive does *not* prevent the compiler from repeating this reference * or combining it with other references, so it should not be used without * protection of appropriate locks. * * This function is only for update-side use. Using this function * when protected only by rcu_read_lock() will result in infrequent * but very ugly failures. */ #define rcu_dereference_protected(p, c) \ __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu) /** * rcu_dereference() - fetch RCU-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * This is a simple wrapper around rcu_dereference_check(). */ #define rcu_dereference(p) rcu_dereference_check(p, 0) /** * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) /** * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) /** * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism * @p: The pointer to hand off * * This is simply an identity function, but it documents where a pointer * is handed off from RCU to some other synchronization mechanism, for * example, reference counting or locking. In C11, it would map to * kill_dependency(). It could be used as follows:: * * rcu_read_lock(); * p = rcu_dereference(gp); * long_lived = is_long_lived(p); * if (long_lived) { * if (!atomic_inc_not_zero(p->refcnt)) * long_lived = false; * else * p = rcu_pointer_handoff(p); * } * rcu_read_unlock(); */ #define rcu_pointer_handoff(p) (p) /** * rcu_read_lock() - mark the beginning of an RCU read-side critical section * * When synchronize_rcu() is invoked on one CPU while other CPUs * are within RCU read-side critical sections, then the * synchronize_rcu() is guaranteed to block until after all the other * CPUs exit their critical sections. Similarly, if call_rcu() is invoked * on one CPU while other CPUs are within RCU read-side critical * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also * wait for regions of code with preemption disabled, including regions of * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which * define synchronize_sched(), only code enclosed within rcu_read_lock() * and rcu_read_unlock() are guaranteed to be waited for. * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU * read-side critical section, (2) CPU 1 invokes call_rcu() to register * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU * callback is invoked. This is legal, because the RCU read-side critical * section that was running concurrently with the call_rcu() (and which * therefore might be referencing something that the corresponding RCU * callback would free up) has completed before the corresponding * RCU callback is invoked. * * RCU read-side critical sections may be nested. Any deferred actions * will be deferred until the outermost RCU read-side critical section * completes. * * You can avoid reading and understanding the next paragraph by * following this rule: don't put anything in an rcu_read_lock() RCU * read-side critical section that would block in a !PREEMPTION kernel. * But if you want the full story, read on! * * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ static __always_inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock() used illegally while idle"); } /* * So where is rcu_write_lock()? It does not exist, as there is no * way for writers to lock out RCU readers. This is a feature, not * a bug -- this property is what provides RCU's performance benefits. * Of course, writers must coordinate with each other. The normal * spinlock primitives work well for this, but any other technique may be * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * * In almost all situations, rcu_read_unlock() is immune from deadlock. * In recent kernels that have consolidated synchronize_sched() and * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity * also extends to the scheduler's runqueue and priority-inheritance * spinlocks, courtesy of the quiescent-state deferral that is carried * out when rcu_read_unlock() is invoked with interrupts disabled. * * See rcu_read_lock() for more information. */ static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ __release(RCU); __rcu_read_unlock(); } /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * * This is equivalent to rcu_read_lock(), but also disables softirqs. * Note that anything else that disables softirqs can also serve as an RCU * read-side critical section. However, please note that this equivalence * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and * rcu_read_lock_bh() were unrelated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() * was invoked from some other task. */ static inline void rcu_read_lock_bh(void) { local_bh_disable(); __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); } /** * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ static inline void rcu_read_unlock_bh(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); local_bh_enable(); } /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * * This is equivalent to rcu_read_lock(), but also disables preemption. * Read-side critical sections can also be introduced by anything else that * disables preemption, including local_irq_disable() and friends. However, * please note that the equivalence to rcu_read_lock() applies only to * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() * were unrelated. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_sched() from process context if the matching * rcu_read_lock_sched() was invoked from an NMI handler. */ static inline void rcu_read_lock_sched(void) { preempt_disable(); __acquire(RCU_SCHED); rcu_lock_acquire(&rcu_sched_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_sched() used illegally while idle"); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_lock_sched_notrace(void) { preempt_disable_notrace(); __acquire(RCU_SCHED); } /** * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section * * See rcu_read_lock_sched() for more information. */ static inline void rcu_read_unlock_sched(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_sched() used illegally while idle"); rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); preempt_enable(); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_unlock_sched_notrace(void) { __release(RCU_SCHED); preempt_enable_notrace(); } /** * RCU_INIT_POINTER() - initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * Initialize an RCU-protected pointer in special cases where readers * do not need ordering constraints on the CPU or the compiler. These * special cases are: * * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* * 2. The caller has taken whatever steps are required to prevent * RCU readers from concurrently accessing this pointer *or* * 3. The referenced data structure has already been exposed to * readers either at compile time or via rcu_assign_pointer() *and* * * a. You have not made *any* reader-visible changes to * this structure since then *or* * b. It is OK for readers accessing this structure from its * new location to see the old state of the structure. (For * example, the changes were to statistical counters or to * other state where exact synchronization is not required.) * * Failure to follow these rules governing use of RCU_INIT_POINTER() will * result in impossible-to-diagnose memory corruption. As in the structures * will look OK in crash dumps, but any concurrent RCU readers might * see pre-initialized values of the referenced data structure. So * please be very careful how you use RCU_INIT_POINTER()!!! * * If you are creating an RCU-protected linked structure that is accessed * by a single external-to-structure RCU-protected pointer, then you may * use RCU_INIT_POINTER() to initialize the internal RCU-protected * pointers, but you must use rcu_assign_pointer() to initialize the * external-to-structure pointer *after* you have completely initialized * the reader-accessible portions of the linked structure. * * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no * ordering guarantees for either the CPU or the compiler. */ #define RCU_INIT_POINTER(p, v) \ do { \ rcu_check_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) /** * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * GCC-style initialization for an RCU-protected pointer in a structure field. */ #define RCU_POINTER_INITIALIZER(p, v) \ .p = RCU_INITIALIZER(v) /* * Does the specified offset indicate that the corresponding rcu_head * structure can be handled by kvfree_rcu()? */ #define __is_kvfree_rcu_offset(offset) ((offset) < 4096) /** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree for double-argument invocations. * @rhf: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore * when they are used in a kernel module, that module must invoke the * high-latency rcu_barrier() function at module-unload time. * * The kfree_rcu() function handles this issue. Rather than encoding a * function address in the embedded rcu_head structure, kfree_rcu() instead * encodes the offset of the rcu_head structure within the base structure. * Because the functions are not allowed in the low-order 4096 bytes of * kernel virtual memory, offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * * The object to be freed can be allocated either by kmalloc() or * kmem_cache_alloc(). * * Note that the allowable offset might decrease in the future. * * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ #define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) #define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) /** * kfree_rcu_mightsleep() - kfree an object after a grace period. * @ptr: pointer to kfree for single-argument invocations. * * When it comes to head-less variant, only one argument * is passed and that is just a pointer which has to be * freed after a grace period. Therefore the semantic is * * kfree_rcu_mightsleep(ptr); * * where @ptr is the pointer to be freed by kvfree(). * * Please note, head-less way of freeing is permitted to * use from a context that has to follow might_sleep() * annotation. Otherwise, please switch and embed the * rcu_head structure within the type of @ptr. */ #define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) #define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) #define kvfree_rcu_arg_2(ptr, rhf) \ do { \ typeof (ptr) ___p = (ptr); \ \ if (___p) { \ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \ } \ } while (0) #define kvfree_rcu_arg_1(ptr) \ do { \ typeof(ptr) ___p = (ptr); \ \ if (___p) \ kvfree_call_rcu(NULL, (void *) (___p)); \ } while (0) /* * Place this after a lock-acquisition primitive to guarantee that * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies * if the UNLOCK and LOCK are executed by the same CPU or if the * UNLOCK and LOCK operate on the same lock variable. */ #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ #else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ #define smp_mb__after_unlock_lock() do { } while (0) #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ /* Has the specified rcu_head structure been handed to call_rcu()? */ /** * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() * @rhp: The rcu_head structure to initialize. * * If you intend to invoke rcu_head_after_call_rcu() to test whether a * given rcu_head structure has already been passed to call_rcu(), then * you must also invoke this rcu_head_init() function on it just after * allocating that structure. Calls to this function must not race with * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. */ static inline void rcu_head_init(struct rcu_head *rhp) { rhp->func = (rcu_callback_t)~0L; } /** * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. * @f: The function passed to call_rcu() along with @rhp. * * Returns @true if the @rhp has been passed to call_rcu() with @func, * and @false otherwise. Emits a warning in any other case, including * the case where @rhp has already been invoked after a grace period. * Calls to this function must not race with callback invocation. One way * to avoid such races is to enclose the call to rcu_head_after_call_rcu() * in an RCU read-side critical section that includes a read-side fetch * of the pointer to the structure containing @rhp. */ static inline bool rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) { rcu_callback_t func = READ_ONCE(rhp->func); if (func == f) return true; WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } /* kernel/ksysfs.c definitions */ extern int rcu_expedited; extern int rcu_normal; DEFINE_LOCK_GUARD_0(rcu, do { rcu_read_lock(); /* * sparse doesn't call the cleanup function, * so just release immediately and don't track * the context. We don't need to anyway, since * the whole point of the guard is to not need * the explicit unlock. */ __release(RCU); } while (0), rcu_read_unlock()) #endif /* __LINUX_RCUPDATE_H */ |
| 2256 3 3 619 619 619 484 485 561 1334 2397 2603 2602 2603 1761 1760 206 207 1982 619 619 619 617 57 57 3241 5515 11 3241 13 13 13 6 7 318 318 316 2 318 317 318 318 47 318 318 1 316 318 318 20 318 42 318 297 260 287 1 318 277 278 6 283 283 999 998 1007 1007 1007 1001 999 998 50 50 50 13 13 13 13 13 13 13 13 619 618 619 619 4 79 500 500 500 500 500 182 181 251 251 250 205 201 6 172 35 207 924 689 251 58 58 164 41 2 2 2 2 2 2 814 815 815 4 4 277 19 266 6 133 6 123 9 5 5 318 6 277 6 609 292 318 609 1 306 305 608 68 233 308 609 292 311 8 74 74 608 292 318 480 476 3 475 5 5 608 49 4 83 41 41 1 40 40 480 480 640 1 2 2 637 329 308 1 296 328 6 618 327 293 395 225 549 68 619 619 619 617 10 610 2 1 609 609 603 6 1 6 4 547 40 2 528 27 480 2 196 292 488 292 196 8 288 7 185 480 480 183 11 192 192 288 479 474 6 8 50 77 10 83 4 41 45 46 46 47 63 238 334 333 17 10 309 58 192 185 6 187 5 192 192 187 5 5 277 80 1 75 1 1 1 1 65 8 72 53 2 2 9 3 69 66 82 9 72 3 2 1 3 1 233 2 233 233 93 18 46 121 165 237 237 234 233 16 138 138 52 80 64 68 4 126 47 85 36 130 147 50 102 145 3 237 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/fork.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * 'fork.c' contains the help-routines for the 'fork' system call * (see also entry.S and others). * Fork is rather simple, once you get the hang of it, but the memory * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' */ #include <linux/anon_inodes.h> #include <linux/slab.h> #include <linux/sched/autogroup.h> #include <linux/sched/mm.h> #include <linux/sched/coredump.h> #include <linux/sched/user.h> #include <linux/sched/numa_balancing.h> #include <linux/sched/stat.h> #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/sched/cputime.h> #include <linux/seq_file.h> #include <linux/rtmutex.h> #include <linux/init.h> #include <linux/unistd.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/completion.h> #include <linux/personality.h> #include <linux/mempolicy.h> #include <linux/sem.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/iocontext.h> #include <linux/key.h> #include <linux/kmsan.h> #include <linux/binfmts.h> #include <linux/mman.h> #include <linux/mmu_notifier.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/memblock.h> #include <linux/nsproxy.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/cgroup.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <linux/seccomp.h> #include <linux/swap.h> #include <linux/syscalls.h> #include <linux/syscall_user_dispatch.h> #include <linux/jiffies.h> #include <linux/futex.h> #include <linux/compat.h> #include <linux/kthread.h> #include <linux/task_io_accounting_ops.h> #include <linux/rcupdate.h> #include <linux/ptrace.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/memcontrol.h> #include <linux/ftrace.h> #include <linux/proc_fs.h> #include <linux/profile.h> #include <linux/rmap.h> #include <linux/ksm.h> #include <linux/acct.h> #include <linux/userfaultfd_k.h> #include <linux/tsacct_kern.h> #include <linux/cn_proc.h> #include <linux/freezer.h> #include <linux/delayacct.h> #include <linux/taskstats_kern.h> #include <linux/tty.h> #include <linux/fs_struct.h> #include <linux/magic.h> #include <linux/perf_event.h> #include <linux/posix-timers.h> #include <linux/user-return-notifier.h> #include <linux/oom.h> #include <linux/khugepaged.h> #include <linux/signalfd.h> #include <linux/uprobes.h> #include <linux/aio.h> #include <linux/compiler.h> #include <linux/sysctl.h> #include <linux/kcov.h> #include <linux/livepatch.h> #include <linux/thread_info.h> #include <linux/stackleak.h> #include <linux/kasan.h> #include <linux/scs.h> #include <linux/io_uring.h> #include <linux/bpf.h> #include <linux/stackprotector.h> #include <linux/user_events.h> #include <linux/iommu.h> #include <linux/rseq.h> #include <uapi/linux/pidfd.h> #include <linux/pidfs.h> #include <asm/pgalloc.h> #include <linux/uaccess.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <trace/events/sched.h> #define CREATE_TRACE_POINTS #include <trace/events/task.h> #include <kunit/visibility.h> /* * Minimum number of threads to boot the kernel */ #define MIN_THREADS 20 /* * Maximum number of threads */ #define MAX_THREADS FUTEX_TID_MASK /* * Protected counters by write_lock_irq(&tasklist_lock) */ unsigned long total_forks; /* Handle normal Linux uptimes. */ int nr_threads; /* The idle threads do not count.. */ static int max_threads; /* tunable limit on nr_threads */ #define NAMED_ARRAY_INDEX(x) [x] = __stringify(x) static const char * const resident_page_types[] = { NAMED_ARRAY_INDEX(MM_FILEPAGES), NAMED_ARRAY_INDEX(MM_ANONPAGES), NAMED_ARRAY_INDEX(MM_SWAPENTS), NAMED_ARRAY_INDEX(MM_SHMEMPAGES), }; DEFINE_PER_CPU(unsigned long, process_counts) = 0; __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ #ifdef CONFIG_PROVE_RCU int lockdep_tasklist_lock_is_held(void) { return lockdep_is_held(&tasklist_lock); } EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); #endif /* #ifdef CONFIG_PROVE_RCU */ int nr_processes(void) { int cpu; int total = 0; for_each_possible_cpu(cpu) total += per_cpu(process_counts, cpu); return total; } void __weak arch_release_task_struct(struct task_struct *tsk) { } static struct kmem_cache *task_struct_cachep; static inline struct task_struct *alloc_task_struct_node(int node) { return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); } static inline void free_task_struct(struct task_struct *tsk) { kmem_cache_free(task_struct_cachep, tsk); } /* * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a * kmemcache based allocator. */ # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) # ifdef CONFIG_VMAP_STACK /* * vmalloc() is a bit slow, and calling vfree() enough times will force a TLB * flush. Try to minimize the number of calls by caching stacks. */ #define NR_CACHED_STACKS 2 static DEFINE_PER_CPU(struct vm_struct *, cached_stacks[NR_CACHED_STACKS]); struct vm_stack { struct rcu_head rcu; struct vm_struct *stack_vm_area; }; static bool try_release_thread_stack_to_cache(struct vm_struct *vm) { unsigned int i; for (i = 0; i < NR_CACHED_STACKS; i++) { struct vm_struct *tmp = NULL; if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm)) return true; } return false; } static void thread_stack_free_rcu(struct rcu_head *rh) { struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu); if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area)) return; vfree(vm_stack); } static void thread_stack_delayed_free(struct task_struct *tsk) { struct vm_stack *vm_stack = tsk->stack; vm_stack->stack_vm_area = tsk->stack_vm_area; call_rcu(&vm_stack->rcu, thread_stack_free_rcu); } static int free_vm_stack_cache(unsigned int cpu) { struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu); int i; for (i = 0; i < NR_CACHED_STACKS; i++) { struct vm_struct *vm_stack = cached_vm_stacks[i]; if (!vm_stack) continue; vfree(vm_stack->addr); cached_vm_stacks[i] = NULL; } return 0; } static int memcg_charge_kernel_stack(struct vm_struct *vm) { int i; int ret; int nr_charged = 0; BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) { ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); if (ret) goto err; nr_charged++; } return 0; err: for (i = 0; i < nr_charged; i++) memcg_kmem_uncharge_page(vm->pages[i], 0); return ret; } static int alloc_thread_stack_node(struct task_struct *tsk, int node) { struct vm_struct *vm; void *stack; int i; for (i = 0; i < NR_CACHED_STACKS; i++) { struct vm_struct *s; s = this_cpu_xchg(cached_stacks[i], NULL); if (!s) continue; /* Reset stack metadata. */ kasan_unpoison_range(s->addr, THREAD_SIZE); stack = kasan_reset_tag(s->addr); /* Clear stale pointers from reused stack. */ memset(stack, 0, THREAD_SIZE); if (memcg_charge_kernel_stack(s)) { vfree(s->addr); return -ENOMEM; } tsk->stack_vm_area = s; tsk->stack = stack; return 0; } /* * Allocated stacks are cached and later reused by new threads, * so memcg accounting is performed manually on assigning/releasing * stacks to tasks. Drop __GFP_ACCOUNT. */ stack = __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, VMALLOC_START, VMALLOC_END, THREADINFO_GFP & ~__GFP_ACCOUNT, PAGE_KERNEL, 0, node, __builtin_return_address(0)); if (!stack) return -ENOMEM; vm = find_vm_area(stack); if (memcg_charge_kernel_stack(vm)) { vfree(stack); return -ENOMEM; } /* * We can't call find_vm_area() in interrupt context, and * free_thread_stack() can be called in interrupt context, * so cache the vm_struct. */ tsk->stack_vm_area = vm; stack = kasan_reset_tag(stack); tsk->stack = stack; return 0; } static void free_thread_stack(struct task_struct *tsk) { if (!try_release_thread_stack_to_cache(tsk->stack_vm_area)) thread_stack_delayed_free(tsk); tsk->stack = NULL; tsk->stack_vm_area = NULL; } # else /* !CONFIG_VMAP_STACK */ static void thread_stack_free_rcu(struct rcu_head *rh) { __free_pages(virt_to_page(rh), THREAD_SIZE_ORDER); } static void thread_stack_delayed_free(struct task_struct *tsk) { struct rcu_head *rh = tsk->stack; call_rcu(rh, thread_stack_free_rcu); } static int alloc_thread_stack_node(struct task_struct *tsk, int node) { struct page *page = alloc_pages_node(node, THREADINFO_GFP, THREAD_SIZE_ORDER); if (likely(page)) { tsk->stack = kasan_reset_tag(page_address(page)); return 0; } return -ENOMEM; } static void free_thread_stack(struct task_struct *tsk) { thread_stack_delayed_free(tsk); tsk->stack = NULL; } # endif /* CONFIG_VMAP_STACK */ # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */ static struct kmem_cache *thread_stack_cache; static void thread_stack_free_rcu(struct rcu_head *rh) { kmem_cache_free(thread_stack_cache, rh); } static void thread_stack_delayed_free(struct task_struct *tsk) { struct rcu_head *rh = tsk->stack; call_rcu(rh, thread_stack_free_rcu); } static int alloc_thread_stack_node(struct task_struct *tsk, int node) { unsigned long *stack; stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node); stack = kasan_reset_tag(stack); tsk->stack = stack; return stack ? 0 : -ENOMEM; } static void free_thread_stack(struct task_struct *tsk) { thread_stack_delayed_free(tsk); tsk->stack = NULL; } void thread_stack_cache_init(void) { thread_stack_cache = kmem_cache_create_usercopy("thread_stack", THREAD_SIZE, THREAD_SIZE, 0, 0, THREAD_SIZE, NULL); BUG_ON(thread_stack_cache == NULL); } # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */ /* SLAB cache for signal_struct structures (tsk->signal) */ static struct kmem_cache *signal_cachep; /* SLAB cache for sighand_struct structures (tsk->sighand) */ struct kmem_cache *sighand_cachep; /* SLAB cache for files_struct structures (tsk->files) */ struct kmem_cache *files_cachep; /* SLAB cache for fs_struct structures (tsk->fs) */ struct kmem_cache *fs_cachep; /* SLAB cache for vm_area_struct structures */ static struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; #ifdef CONFIG_PER_VMA_LOCK /* SLAB cache for vm_area_struct.lock */ static struct kmem_cache *vma_lock_cachep; static bool vma_lock_alloc(struct vm_area_struct *vma) { vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL); if (!vma->vm_lock) return false; init_rwsem(&vma->vm_lock->lock); vma->vm_lock_seq = -1; return true; } static inline void vma_lock_free(struct vm_area_struct *vma) { kmem_cache_free(vma_lock_cachep, vma->vm_lock); } #else /* CONFIG_PER_VMA_LOCK */ static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } static inline void vma_lock_free(struct vm_area_struct *vma) {} #endif /* CONFIG_PER_VMA_LOCK */ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) { struct vm_area_struct *vma; vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!vma) return NULL; vma_init(vma, mm); if (!vma_lock_alloc(vma)) { kmem_cache_free(vm_area_cachep, vma); return NULL; } return vma; } struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) { struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); if (!new) return NULL; ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); ASSERT_EXCLUSIVE_WRITER(orig->vm_file); /* * orig->shared.rb may be modified concurrently, but the clone * will be reinitialized. */ data_race(memcpy(new, orig, sizeof(*new))); if (!vma_lock_alloc(new)) { kmem_cache_free(vm_area_cachep, new); return NULL; } INIT_LIST_HEAD(&new->anon_vma_chain); vma_numab_state_init(new); dup_anon_vma_name(orig, new); return new; } void __vm_area_free(struct vm_area_struct *vma) { vma_numab_state_free(vma); free_anon_vma_name(vma); vma_lock_free(vma); kmem_cache_free(vm_area_cachep, vma); } #ifdef CONFIG_PER_VMA_LOCK static void vm_area_free_rcu_cb(struct rcu_head *head) { struct vm_area_struct *vma = container_of(head, struct vm_area_struct, vm_rcu); /* The vma should not be locked while being destroyed. */ VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma); __vm_area_free(vma); } #endif void vm_area_free(struct vm_area_struct *vma) { #ifdef CONFIG_PER_VMA_LOCK call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb); #else __vm_area_free(vma); #endif } static void account_kernel_stack(struct task_struct *tsk, int account) { if (IS_ENABLED(CONFIG_VMAP_STACK)) { struct vm_struct *vm = task_stack_vm_area(tsk); int i; for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB, account * (PAGE_SIZE / 1024)); } else { void *stack = task_stack_page(tsk); /* All stack pages are in the same node. */ mod_lruvec_kmem_state(stack, NR_KERNEL_STACK_KB, account * (THREAD_SIZE / 1024)); } } void exit_task_stack_account(struct task_struct *tsk) { account_kernel_stack(tsk, -1); if (IS_ENABLED(CONFIG_VMAP_STACK)) { struct vm_struct *vm; int i; vm = task_stack_vm_area(tsk); for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) memcg_kmem_uncharge_page(vm->pages[i], 0); } } static void release_task_stack(struct task_struct *tsk) { if (WARN_ON(READ_ONCE(tsk->__state) != TASK_DEAD)) return; /* Better to leak the stack than to free prematurely */ free_thread_stack(tsk); } #ifdef CONFIG_THREAD_INFO_IN_TASK void put_task_stack(struct task_struct *tsk) { if (refcount_dec_and_test(&tsk->stack_refcount)) release_task_stack(tsk); } #endif void free_task(struct task_struct *tsk) { #ifdef CONFIG_SECCOMP WARN_ON_ONCE(tsk->seccomp.filter); #endif release_user_cpus_ptr(tsk); scs_release(tsk); #ifndef CONFIG_THREAD_INFO_IN_TASK /* * The task is finally done with both the stack and thread_info, * so free both. */ release_task_stack(tsk); #else /* * If the task had a separate stack allocation, it should be gone * by now. */ WARN_ON_ONCE(refcount_read(&tsk->stack_refcount) != 0); #endif rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); arch_release_task_struct(tsk); if (tsk->flags & PF_KTHREAD) free_kthread_struct(tsk); bpf_task_storage_free(tsk); free_task_struct(tsk); } EXPORT_SYMBOL(free_task); static void dup_mm_exe_file(struct mm_struct *mm, struct mm_struct *oldmm) { struct file *exe_file; exe_file = get_mm_exe_file(oldmm); RCU_INIT_POINTER(mm->exe_file, exe_file); } #ifdef CONFIG_MMU static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp; int retval; unsigned long charge = 0; LIST_HEAD(uf); VMA_ITERATOR(vmi, mm, 0); uprobe_start_dup_mmap(); if (mmap_write_lock_killable(oldmm)) { retval = -EINTR; goto fail_uprobe_end; } flush_cache_dup_mm(oldmm); uprobe_dup_mmap(oldmm, mm); /* * Not linked in yet - no deadlock potential: */ mmap_write_lock_nested(mm, SINGLE_DEPTH_NESTING); /* No ordering required: file already has been exposed. */ dup_mm_exe_file(mm, oldmm); mm->total_vm = oldmm->total_vm; mm->data_vm = oldmm->data_vm; mm->exec_vm = oldmm->exec_vm; mm->stack_vm = oldmm->stack_vm; retval = ksm_fork(mm, oldmm); if (retval) goto out; khugepaged_fork(mm, oldmm); /* Use __mt_dup() to efficiently build an identical maple tree. */ retval = __mt_dup(&oldmm->mm_mt, &mm->mm_mt, GFP_KERNEL); if (unlikely(retval)) goto out; mt_clear_in_rcu(vmi.mas.tree); for_each_vma(vmi, mpnt) { struct file *file; vma_start_write(mpnt); if (mpnt->vm_flags & VM_DONTCOPY) { retval = vma_iter_clear_gfp(&vmi, mpnt->vm_start, mpnt->vm_end, GFP_KERNEL); if (retval) goto loop_out; vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); continue; } charge = 0; /* * Don't duplicate many vmas if we've been oom-killed (for * example) */ if (fatal_signal_pending(current)) { retval = -EINTR; goto loop_out; } if (mpnt->vm_flags & VM_ACCOUNT) { unsigned long len = vma_pages(mpnt); if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ goto fail_nomem; charge = len; } tmp = vm_area_dup(mpnt); if (!tmp) goto fail_nomem; retval = vma_dup_policy(mpnt, tmp); if (retval) goto fail_nomem_policy; tmp->vm_mm = mm; retval = dup_userfaultfd(tmp, &uf); if (retval) goto fail_nomem_anon_vma_fork; if (tmp->vm_flags & VM_WIPEONFORK) { /* * VM_WIPEONFORK gets a clean slate in the child. * Don't prepare anon_vma until fault since we don't * copy page for current vma. */ tmp->anon_vma = NULL; } else if (anon_vma_fork(tmp, mpnt)) goto fail_nomem_anon_vma_fork; vm_flags_clear(tmp, VM_LOCKED_MASK); /* * Copy/update hugetlb private vma information. */ if (is_vm_hugetlb_page(tmp)) hugetlb_dup_vma_private(tmp); /* * Link the vma into the MT. After using __mt_dup(), memory * allocation is not necessary here, so it cannot fail. */ vma_iter_bulk_store(&vmi, tmp); mm->map_count++; if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); file = tmp->vm_file; if (file) { struct address_space *mapping = file->f_mapping; get_file(file); i_mmap_lock_write(mapping); if (vma_is_shared_maywrite(tmp)) mapping_allow_writable(mapping); flush_dcache_mmap_lock(mapping); /* insert tmp into the share list, just after mpnt */ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); i_mmap_unlock_write(mapping); } if (!(tmp->vm_flags & VM_WIPEONFORK)) retval = copy_page_range(tmp, mpnt); if (retval) { mpnt = vma_next(&vmi); goto loop_out; } } /* a new mm has just been created */ retval = arch_dup_mmap(oldmm, mm); loop_out: vma_iter_free(&vmi); if (!retval) { mt_set_in_rcu(vmi.mas.tree); } else if (mpnt) { /* * The entire maple tree has already been duplicated. If the * mmap duplication fails, mark the failure point with * XA_ZERO_ENTRY. In exit_mmap(), if this marker is encountered, * stop releasing VMAs that have not been duplicated after this * point. */ mas_set_range(&vmi.mas, mpnt->vm_start, mpnt->vm_end - 1); mas_store(&vmi.mas, XA_ZERO_ENTRY); } out: mmap_write_unlock(mm); flush_tlb_mm(oldmm); mmap_write_unlock(oldmm); dup_userfaultfd_complete(&uf); fail_uprobe_end: uprobe_end_dup_mmap(); return retval; fail_nomem_anon_vma_fork: mpol_put(vma_policy(tmp)); fail_nomem_policy: vm_area_free(tmp); fail_nomem: retval = -ENOMEM; vm_unacct_memory(charge); goto loop_out; } static inline int mm_alloc_pgd(struct mm_struct *mm) { mm->pgd = pgd_alloc(mm); if (unlikely(!mm->pgd)) return -ENOMEM; return 0; } static inline void mm_free_pgd(struct mm_struct *mm) { pgd_free(mm, mm->pgd); } #else static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { mmap_write_lock(oldmm); dup_mm_exe_file(mm, oldmm); mmap_write_unlock(oldmm); return 0; } #define mm_alloc_pgd(mm) (0) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ static void check_mm(struct mm_struct *mm) { int i; BUILD_BUG_ON_MSG(ARRAY_SIZE(resident_page_types) != NR_MM_COUNTERS, "Please make sure 'struct resident_page_types[]' is updated as well"); for (i = 0; i < NR_MM_COUNTERS; i++) { long x = percpu_counter_sum(&mm->rss_stat[i]); if (unlikely(x)) pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n", mm, resident_page_types[i], x); } if (mm_pgtables_bytes(mm)) pr_alert("BUG: non-zero pgtables_bytes on freeing mm: %ld\n", mm_pgtables_bytes(mm)); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS VM_BUG_ON_MM(mm->pmd_huge_pte, mm); #endif } #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) static void do_check_lazy_tlb(void *arg) { struct mm_struct *mm = arg; WARN_ON_ONCE(current->active_mm == mm); } static void do_shoot_lazy_tlb(void *arg) { struct mm_struct *mm = arg; if (current->active_mm == mm) { WARN_ON_ONCE(current->mm); current->active_mm = &init_mm; switch_mm(mm, &init_mm, current); } } static void cleanup_lazy_tlbs(struct mm_struct *mm) { if (!IS_ENABLED(CONFIG_MMU_LAZY_TLB_SHOOTDOWN)) { /* * In this case, lazy tlb mms are refounted and would not reach * __mmdrop until all CPUs have switched away and mmdrop()ed. */ return; } /* * Lazy mm shootdown does not refcount "lazy tlb mm" usage, rather it * requires lazy mm users to switch to another mm when the refcount * drops to zero, before the mm is freed. This requires IPIs here to * switch kernel threads to init_mm. * * archs that use IPIs to flush TLBs can piggy-back that lazy tlb mm * switch with the final userspace teardown TLB flush which leaves the * mm lazy on this CPU but no others, reducing the need for additional * IPIs here. There are cases where a final IPI is still required here, * such as the final mmdrop being performed on a different CPU than the * one exiting, or kernel threads using the mm when userspace exits. * * IPI overheads have not found to be expensive, but they could be * reduced in a number of possible ways, for example (roughly * increasing order of complexity): * - The last lazy reference created by exit_mm() could instead switch * to init_mm, however it's probable this will run on the same CPU * immediately afterwards, so this may not reduce IPIs much. * - A batch of mms requiring IPIs could be gathered and freed at once. * - CPUs store active_mm where it can be remotely checked without a * lock, to filter out false-positives in the cpumask. * - After mm_users or mm_count reaches zero, switching away from the * mm could clear mm_cpumask to reduce some IPIs, perhaps together * with some batching or delaying of the final IPIs. * - A delayed freeing and RCU-like quiescing sequence based on mm * switching to avoid IPIs completely. */ on_each_cpu_mask(mm_cpumask(mm), do_shoot_lazy_tlb, (void *)mm, 1); if (IS_ENABLED(CONFIG_DEBUG_VM_SHOOT_LAZIES)) on_each_cpu(do_check_lazy_tlb, (void *)mm, 1); } /* * Called when the last reference to the mm * is dropped: either by a lazy thread or by * mmput. Free the page directory and the mm. */ void __mmdrop(struct mm_struct *mm) { BUG_ON(mm == &init_mm); WARN_ON_ONCE(mm == current->mm); /* Ensure no CPUs are using this as their lazy tlb mm */ cleanup_lazy_tlbs(mm); WARN_ON_ONCE(mm == current->active_mm); mm_free_pgd(mm); destroy_context(mm); mmu_notifier_subscriptions_destroy(mm); check_mm(mm); put_user_ns(mm->user_ns); mm_pasid_drop(mm); mm_destroy_cid(mm); percpu_counter_destroy_many(mm->rss_stat, NR_MM_COUNTERS); free_mm(mm); } EXPORT_SYMBOL_GPL(__mmdrop); static void mmdrop_async_fn(struct work_struct *work) { struct mm_struct *mm; mm = container_of(work, struct mm_struct, async_put_work); __mmdrop(mm); } static void mmdrop_async(struct mm_struct *mm) { if (unlikely(atomic_dec_and_test(&mm->mm_count))) { INIT_WORK(&mm->async_put_work, mmdrop_async_fn); schedule_work(&mm->async_put_work); } } static inline void free_signal_struct(struct signal_struct *sig) { taskstats_tgid_free(sig); sched_autogroup_exit(sig); /* * __mmdrop is not safe to call from softirq context on x86 due to * pgd_dtor so postpone it to the async context */ if (sig->oom_mm) mmdrop_async(sig->oom_mm); kmem_cache_free(signal_cachep, sig); } static inline void put_signal_struct(struct signal_struct *sig) { if (refcount_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); WARN_ON(refcount_read(&tsk->usage)); WARN_ON(tsk == current); io_uring_free(tsk); cgroup_free(tsk); task_numa_free(tsk, true); security_task_free(tsk); exit_creds(tsk); delayacct_tsk_free(tsk); put_signal_struct(tsk->signal); sched_core_free(tsk); free_task(tsk); } EXPORT_SYMBOL_GPL(__put_task_struct); void __put_task_struct_rcu_cb(struct rcu_head *rhp) { struct task_struct *task = container_of(rhp, struct task_struct, rcu); __put_task_struct(task); } EXPORT_SYMBOL_GPL(__put_task_struct_rcu_cb); void __init __weak arch_task_cache_init(void) { } /* * set_max_threads */ static void __init set_max_threads(unsigned int max_threads_suggested) { u64 threads; unsigned long nr_pages = PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size()); /* * The number of threads shall be limited such that the thread * structures may only consume a small part of the available memory. */ if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) threads = MAX_THREADS; else threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, (u64) THREAD_SIZE * 8UL); if (threads > max_threads_suggested) threads = max_threads_suggested; max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS); } #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT /* Initialized by the architecture: */ int arch_task_struct_size __read_mostly; #endif static void __init task_struct_whitelist(unsigned long *offset, unsigned long *size) { /* Fetch thread_struct whitelist for the architecture. */ arch_thread_struct_whitelist(offset, size); /* * Handle zero-sized whitelist or empty thread_struct, otherwise * adjust offset to position of thread_struct in task_struct. */ if (unlikely(*size == 0)) *offset = 0; else *offset += offsetof(struct task_struct, thread); } void __init fork_init(void) { int i; #ifndef ARCH_MIN_TASKALIGN #define ARCH_MIN_TASKALIGN 0 #endif int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); unsigned long useroffset, usersize; /* create a slab on which task_structs can be allocated */ task_struct_whitelist(&useroffset, &usersize); task_struct_cachep = kmem_cache_create_usercopy("task_struct", arch_task_struct_size, align, SLAB_PANIC|SLAB_ACCOUNT, useroffset, usersize, NULL); /* do the arch specific task caches init */ arch_task_cache_init(); set_max_threads(MAX_THREADS); init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2; init_task.signal->rlim[RLIMIT_SIGPENDING] = init_task.signal->rlim[RLIMIT_NPROC]; for (i = 0; i < UCOUNT_COUNTS; i++) init_user_ns.ucount_max[i] = max_threads/2; set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_NPROC, RLIM_INFINITY); set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MSGQUEUE, RLIM_INFINITY); set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_SIGPENDING, RLIM_INFINITY); set_userns_rlimit_max(&init_user_ns, UCOUNT_RLIMIT_MEMLOCK, RLIM_INFINITY); #ifdef CONFIG_VMAP_STACK cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache", NULL, free_vm_stack_cache); #endif scs_init(); lockdep_init_task(&init_task); uprobes_init(); } int __weak arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; return 0; } void set_task_stack_end_magic(struct task_struct *tsk) { unsigned long *stackend; stackend = end_of_stack(tsk); *stackend = STACK_END_MAGIC; /* for overflow detection */ } static struct task_struct *dup_task_struct(struct task_struct *orig, int node) { struct task_struct *tsk; int err; if (node == NUMA_NO_NODE) node = tsk_fork_get_node(orig); tsk = alloc_task_struct_node(node); if (!tsk) return NULL; err = arch_dup_task_struct(tsk, orig); if (err) goto free_tsk; err = alloc_thread_stack_node(tsk, node); if (err) goto free_tsk; #ifdef CONFIG_THREAD_INFO_IN_TASK refcount_set(&tsk->stack_refcount, 1); #endif account_kernel_stack(tsk, 1); err = scs_prepare(tsk, node); if (err) goto free_stack; #ifdef CONFIG_SECCOMP /* * We must handle setting up seccomp filters once we're under * the sighand lock in case orig has changed between now and * then. Until then, filter must be NULL to avoid messing up * the usage counts on the error path calling free_task. */ tsk->seccomp.filter = NULL; #endif setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); set_task_stack_end_magic(tsk); clear_syscall_work_syscall_user_dispatch(tsk); #ifdef CONFIG_STACKPROTECTOR tsk->stack_canary = get_random_canary(); #endif if (orig->cpus_ptr == &orig->cpus_mask) tsk->cpus_ptr = &tsk->cpus_mask; dup_user_cpus_ptr(tsk, orig, node); /* * One for the user space visible state that goes away when reaped. * One for the scheduler. */ refcount_set(&tsk->rcu_users, 2); /* One for the rcu users */ refcount_set(&tsk->usage, 1); #ifdef CONFIG_BLK_DEV_IO_TRACE tsk->btrace_seq = 0; #endif tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; tsk->worker_private = NULL; kcov_task_init(tsk); kmsan_task_create(tsk); kmap_local_fork(tsk); #ifdef CONFIG_FAULT_INJECTION tsk->fail_nth = 0; #endif #ifdef CONFIG_BLK_CGROUP tsk->throttle_disk = NULL; tsk->use_memdelay = 0; #endif #ifdef CONFIG_ARCH_HAS_CPU_PASID tsk->pasid_activated = 0; #endif #ifdef CONFIG_MEMCG tsk->active_memcg = NULL; #endif #ifdef CONFIG_CPU_SUP_INTEL tsk->reported_split_lock = 0; #endif #ifdef CONFIG_SCHED_MM_CID tsk->mm_cid = -1; tsk->last_mm_cid = -1; tsk->mm_cid_active = 0; tsk->migrate_from_cpu = -1; #endif return tsk; free_stack: exit_task_stack_account(tsk); free_thread_stack(tsk); free_tsk: free_task_struct(tsk); return NULL; } __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT; static int __init coredump_filter_setup(char *s) { default_dump_filter = (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) & MMF_DUMP_FILTER_MASK; return 1; } __setup("coredump_filter=", coredump_filter_setup); #include <linux/init_task.h> static void mm_init_aio(struct mm_struct *mm) { #ifdef CONFIG_AIO spin_lock_init(&mm->ioctx_lock); mm->ioctx_table = NULL; #endif } static __always_inline void mm_clear_owner(struct mm_struct *mm, struct task_struct *p) { #ifdef CONFIG_MEMCG if (mm->owner == p) WRITE_ONCE(mm->owner, NULL); #endif } static void mm_init_owner(struct mm_struct *mm, struct task_struct *p) { #ifdef CONFIG_MEMCG mm->owner = p; #endif } static void mm_init_uprobes_state(struct mm_struct *mm) { #ifdef CONFIG_UPROBES mm->uprobes_state.xol_area = NULL; #endif } static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, struct user_namespace *user_ns) { mt_init_flags(&mm->mm_mt, MM_MT_FLAGS); mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); atomic_set(&mm->mm_users, 1); atomic_set(&mm->mm_count, 1); seqcount_init(&mm->write_protect_seq); mmap_init_lock(mm); INIT_LIST_HEAD(&mm->mmlist); #ifdef CONFIG_PER_VMA_LOCK mm->mm_lock_seq = 0; #endif mm_pgtables_bytes_init(mm); mm->map_count = 0; mm->locked_vm = 0; atomic64_set(&mm->pinned_vm, 0); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); spin_lock_init(&mm->arg_lock); mm_init_cpumask(mm); mm_init_aio(mm); mm_init_owner(mm, p); mm_pasid_init(mm); RCU_INIT_POINTER(mm->exe_file, NULL); mmu_notifier_subscriptions_init(mm); init_tlb_flush_pending(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS mm->pmd_huge_pte = NULL; #endif mm_init_uprobes_state(mm); hugetlb_count_init(mm); if (current->mm) { mm->flags = mmf_init_flags(current->mm->flags); mm->def_flags = current->mm->def_flags & VM_INIT_DEF_MASK; } else { mm->flags = default_dump_filter; mm->def_flags = 0; } if (mm_alloc_pgd(mm)) goto fail_nopgd; if (init_new_context(p, mm)) goto fail_nocontext; if (mm_alloc_cid(mm)) goto fail_cid; if (percpu_counter_init_many(mm->rss_stat, 0, GFP_KERNEL_ACCOUNT, NR_MM_COUNTERS)) goto fail_pcpu; mm->user_ns = get_user_ns(user_ns); lru_gen_init_mm(mm); return mm; fail_pcpu: mm_destroy_cid(mm); fail_cid: destroy_context(mm); fail_nocontext: mm_free_pgd(mm); fail_nopgd: free_mm(mm); return NULL; } /* * Allocate and initialize an mm_struct. */ struct mm_struct *mm_alloc(void) { struct mm_struct *mm; mm = allocate_mm(); if (!mm) return NULL; memset(mm, 0, sizeof(*mm)); return mm_init(mm, current, current_user_ns()); } EXPORT_SYMBOL_IF_KUNIT(mm_alloc); static inline void __mmput(struct mm_struct *mm) { VM_BUG_ON(atomic_read(&mm->mm_users)); uprobe_clear_state(mm); exit_aio(mm); ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); mm_put_huge_zero_folio(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); list_del(&mm->mmlist); spin_unlock(&mmlist_lock); } if (mm->binfmt) module_put(mm->binfmt->module); lru_gen_del_mm(mm); mmdrop(mm); } /* * Decrement the use count and release all resources for an mm. */ void mmput(struct mm_struct *mm) { might_sleep(); if (atomic_dec_and_test(&mm->mm_users)) __mmput(mm); } EXPORT_SYMBOL_GPL(mmput); #ifdef CONFIG_MMU static void mmput_async_fn(struct work_struct *work) { struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); __mmput(mm); } void mmput_async(struct mm_struct *mm) { if (atomic_dec_and_test(&mm->mm_users)) { INIT_WORK(&mm->async_put_work, mmput_async_fn); schedule_work(&mm->async_put_work); } } EXPORT_SYMBOL_GPL(mmput_async); #endif /** * set_mm_exe_file - change a reference to the mm's executable file * @mm: The mm to change. * @new_exe_file: The new file to use. * * This changes mm's executable file (shown as symlink /proc/[pid]/exe). * * Main users are mmput() and sys_execve(). Callers prevent concurrent * invocations: in mmput() nobody alive left, in execve it happens before * the new mm is made visible to anyone. * * Can only fail if new_exe_file != NULL. */ int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { struct file *old_exe_file; /* * It is safe to dereference the exe_file without RCU as * this function is only called if nobody else can access * this mm -- see comment above for justification. */ old_exe_file = rcu_dereference_raw(mm->exe_file); if (new_exe_file) get_file(new_exe_file); rcu_assign_pointer(mm->exe_file, new_exe_file); if (old_exe_file) fput(old_exe_file); return 0; } /** * replace_mm_exe_file - replace a reference to the mm's executable file * @mm: The mm to change. * @new_exe_file: The new file to use. * * This changes mm's executable file (shown as symlink /proc/[pid]/exe). * * Main user is sys_prctl(PR_SET_MM_MAP/EXE_FILE). */ int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) { struct vm_area_struct *vma; struct file *old_exe_file; int ret = 0; /* Forbid mm->exe_file change if old file still mapped. */ old_exe_file = get_mm_exe_file(mm); if (old_exe_file) { VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); for_each_vma(vmi, vma) { if (!vma->vm_file) continue; if (path_equal(&vma->vm_file->f_path, &old_exe_file->f_path)) { ret = -EBUSY; break; } } mmap_read_unlock(mm); fput(old_exe_file); if (ret) return ret; } get_file(new_exe_file); /* set the new file */ mmap_write_lock(mm); old_exe_file = rcu_dereference_raw(mm->exe_file); rcu_assign_pointer(mm->exe_file, new_exe_file); mmap_write_unlock(mm); if (old_exe_file) fput(old_exe_file); return 0; } /** * get_mm_exe_file - acquire a reference to the mm's executable file * @mm: The mm of interest. * * Returns %NULL if mm has no associated executable file. * User must release file via fput(). */ struct file *get_mm_exe_file(struct mm_struct *mm) { struct file *exe_file; rcu_read_lock(); exe_file = get_file_rcu(&mm->exe_file); rcu_read_unlock(); return exe_file; } /** * get_task_exe_file - acquire a reference to the task's executable file * @task: The task. * * Returns %NULL if task's mm (if any) has no associated executable file or * this is a kernel thread with borrowed mm (see the comment above get_task_mm). * User must release file via fput(). */ struct file *get_task_exe_file(struct task_struct *task) { struct file *exe_file = NULL; struct mm_struct *mm; task_lock(task); mm = task->mm; if (mm) { if (!(task->flags & PF_KTHREAD)) exe_file = get_mm_exe_file(mm); } task_unlock(task); return exe_file; } /** * get_task_mm - acquire a reference to the task's mm * @task: The task. * * Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning * this kernel workthread has transiently adopted a user mm with use_mm, * to do its AIO) is not set and if so returns a reference to it, after * bumping up the use count. User must release the mm via mmput() * after use. Typically used by /proc and ptrace. */ struct mm_struct *get_task_mm(struct task_struct *task) { struct mm_struct *mm; if (task->flags & PF_KTHREAD) return NULL; task_lock(task); mm = task->mm; if (mm) mmget(mm); task_unlock(task); return mm; } EXPORT_SYMBOL_GPL(get_task_mm); struct mm_struct *mm_access(struct task_struct *task, unsigned int mode) { struct mm_struct *mm; int err; err = down_read_killable(&task->signal->exec_update_lock); if (err) return ERR_PTR(err); mm = get_task_mm(task); if (mm && mm != current->mm && !ptrace_may_access(task, mode)) { mmput(mm); mm = ERR_PTR(-EACCES); } up_read(&task->signal->exec_update_lock); return mm; } static void complete_vfork_done(struct task_struct *tsk) { struct completion *vfork; task_lock(tsk); vfork = tsk->vfork_done; if (likely(vfork)) { tsk->vfork_done = NULL; complete(vfork); } task_unlock(tsk); } static int wait_for_vfork_done(struct task_struct *child, struct completion *vfork) { unsigned int state = TASK_KILLABLE|TASK_FREEZABLE; int killed; cgroup_enter_frozen(); killed = wait_for_completion_state(vfork, state); cgroup_leave_frozen(false); if (killed) { task_lock(child); child->vfork_done = NULL; task_unlock(child); } put_task_struct(child); return killed; } /* Please note the differences between mmput and mm_release. * mmput is called whenever we stop holding onto a mm_struct, * error success whatever. * * mm_release is called after a mm_struct has been removed * from the current process. * * This difference is important for error handling, when we * only half set up a mm_struct for a new process and need to restore * the old one. Because we mmput the new mm_struct before * restoring the old one. . . * Eric Biederman 10 January 1998 */ static void mm_release(struct task_struct *tsk, struct mm_struct *mm) { uprobe_free_utask(tsk); /* Get rid of any cached register state */ deactivate_mm(tsk, mm); /* * Signal userspace if we're not exiting with a core dump * because we want to leave the value intact for debugging * purposes. */ if (tsk->clear_child_tid) { if (atomic_read(&mm->mm_users) > 1) { /* * We don't check the error code - if userspace has * not set up a proper pointer then tough luck. */ put_user(0, tsk->clear_child_tid); do_futex(tsk->clear_child_tid, FUTEX_WAKE, 1, NULL, NULL, 0, 0); } tsk->clear_child_tid = NULL; } /* * All done, finally we can wake up parent and return this mm to him. * Also kthread_stop() uses this completion for synchronization. */ if (tsk->vfork_done) complete_vfork_done(tsk); } void exit_mm_release(struct task_struct *tsk, struct mm_struct *mm) { futex_exit_release(tsk); mm_release(tsk, mm); } void exec_mm_release(struct task_struct *tsk, struct mm_struct *mm) { futex_exec_release(tsk); mm_release(tsk, mm); } /** * dup_mm() - duplicates an existing mm structure * @tsk: the task_struct with which the new mm will be associated. * @oldmm: the mm to duplicate. * * Allocates a new mm structure and duplicates the provided @oldmm structure * content into it. * * Return: the duplicated mm or NULL on failure. */ static struct mm_struct *dup_mm(struct task_struct *tsk, struct mm_struct *oldmm) { struct mm_struct *mm; int err; mm = allocate_mm(); if (!mm) goto fail_nomem; memcpy(mm, oldmm, sizeof(*mm)); if (!mm_init(mm, tsk, mm->user_ns)) goto fail_nomem; err = dup_mmap(mm, oldmm); if (err) goto free_pt; mm->hiwater_rss = get_mm_rss(mm); mm->hiwater_vm = mm->total_vm; if (mm->binfmt && !try_module_get(mm->binfmt->module)) goto free_pt; return mm; free_pt: /* don't put binfmt in mmput, we haven't got module yet */ mm->binfmt = NULL; mm_init_owner(mm, NULL); mmput(mm); fail_nomem: return NULL; } static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) { struct mm_struct *mm, *oldmm; tsk->min_flt = tsk->maj_flt = 0; tsk->nvcsw = tsk->nivcsw = 0; #ifdef CONFIG_DETECT_HUNG_TASK tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; tsk->last_switch_time = 0; #endif tsk->mm = NULL; tsk->active_mm = NULL; /* * Are we cloning a kernel thread? * * We need to steal a active VM for that.. */ oldmm = current->mm; if (!oldmm) return 0; if (clone_flags & CLONE_VM) { mmget(oldmm); mm = oldmm; } else { mm = dup_mm(tsk, current->mm); if (!mm) return -ENOMEM; } tsk->mm = mm; tsk->active_mm = mm; sched_mm_cid_fork(tsk); return 0; } static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { struct fs_struct *fs = current->fs; if (clone_flags & CLONE_FS) { /* tsk->fs is already what we want */ spin_lock(&fs->lock); /* "users" and "in_exec" locked for check_unsafe_exec() */ if (fs->in_exec) { spin_unlock(&fs->lock); return -EAGAIN; } fs->users++; spin_unlock(&fs->lock); return 0; } tsk->fs = copy_fs_struct(fs); if (!tsk->fs) return -ENOMEM; return 0; } static int copy_files(unsigned long clone_flags, struct task_struct *tsk, int no_files) { struct files_struct *oldf, *newf; int error = 0; /* * A background process may not have any files ... */ oldf = current->files; if (!oldf) goto out; if (no_files) { tsk->files = NULL; goto out; } if (clone_flags & CLONE_FILES) { atomic_inc(&oldf->count); goto out; } newf = dup_fd(oldf, NR_OPEN_MAX, &error); if (!newf) goto out; tsk->files = newf; error = 0; out: return error; } static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) { struct sighand_struct *sig; if (clone_flags & CLONE_SIGHAND) { refcount_inc(¤t->sighand->count); return 0; } sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL); RCU_INIT_POINTER(tsk->sighand, sig); if (!sig) return -ENOMEM; refcount_set(&sig->count, 1); spin_lock_irq(¤t->sighand->siglock); memcpy(sig->action, current->sighand->action, sizeof(sig->action)); spin_unlock_irq(¤t->sighand->siglock); /* Reset all signal handler not set to SIG_IGN to SIG_DFL. */ if (clone_flags & CLONE_CLEAR_SIGHAND) flush_signal_handlers(tsk, 0); return 0; } void __cleanup_sighand(struct sighand_struct *sighand) { if (refcount_dec_and_test(&sighand->count)) { signalfd_cleanup(sighand); /* * sighand_cachep is SLAB_TYPESAFE_BY_RCU so we can free it * without an RCU grace period, see __lock_task_sighand(). */ kmem_cache_free(sighand_cachep, sighand); } } /* * Initialize POSIX timer handling for a thread group. */ static void posix_cpu_timers_init_group(struct signal_struct *sig) { struct posix_cputimers *pct = &sig->posix_cputimers; unsigned long cpu_limit; cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); posix_cputimers_group_init(pct, cpu_limit); } static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; if (clone_flags & CLONE_THREAD) return 0; sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); tsk->signal = sig; if (!sig) return -ENOMEM; sig->nr_threads = 1; sig->quick_threads = 1; atomic_set(&sig->live, 1); refcount_set(&sig->sigcnt, 1); /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */ sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node); tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head); init_waitqueue_head(&sig->wait_chldexit); sig->curr_target = tsk; init_sigpending(&sig->shared_pending); INIT_HLIST_HEAD(&sig->multiprocess); seqlock_init(&sig->stats_lock); prev_cputime_init(&sig->prev_cputime); #ifdef CONFIG_POSIX_TIMERS INIT_LIST_HEAD(&sig->posix_timers); hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); sig->real_timer.function = it_real_fn; #endif task_lock(current->group_leader); memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); task_unlock(current->group_leader); posix_cpu_timers_init_group(sig); tty_audit_fork(sig); sched_autogroup_fork(sig); sig->oom_score_adj = current->signal->oom_score_adj; sig->oom_score_adj_min = current->signal->oom_score_adj_min; mutex_init(&sig->cred_guard_mutex); init_rwsem(&sig->exec_update_lock); return 0; } static void copy_seccomp(struct task_struct *p) { #ifdef CONFIG_SECCOMP /* * Must be called with sighand->lock held, which is common to * all threads in the group. Holding cred_guard_mutex is not * needed because this new task is not yet running and cannot * be racing exec. */ assert_spin_locked(¤t->sighand->siglock); /* Ref-count the new filter user, and assign it. */ get_seccomp_filter(current); p->seccomp = current->seccomp; /* * Explicitly enable no_new_privs here in case it got set * between the task_struct being duplicated and holding the * sighand lock. The seccomp state and nnp must be in sync. */ if (task_no_new_privs(current)) task_set_no_new_privs(p); /* * If the parent gained a seccomp mode after copying thread * flags and between before we held the sighand lock, we have * to manually enable the seccomp thread flag here. */ if (p->seccomp.mode != SECCOMP_MODE_DISABLED) set_task_syscall_work(p, SECCOMP); #endif } SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr) { current->clear_child_tid = tidptr; return task_pid_vnr(current); } static void rt_mutex_init_task(struct task_struct *p) { raw_spin_lock_init(&p->pi_lock); #ifdef CONFIG_RT_MUTEXES p->pi_waiters = RB_ROOT_CACHED; p->pi_top_task = NULL; p->pi_blocked_on = NULL; #endif } static inline void init_task_pid_links(struct task_struct *task) { enum pid_type type; for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) INIT_HLIST_NODE(&task->pid_links[type]); } static inline void init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid) { if (type == PIDTYPE_PID) task->thread_pid = pid; else task->signal->pids[type] = pid; } static inline void rcu_copy_process(struct task_struct *p) { #ifdef CONFIG_PREEMPT_RCU p->rcu_read_lock_nesting = 0; p->rcu_read_unlock_special.s = 0; p->rcu_blocked_node = NULL; INIT_LIST_HEAD(&p->rcu_node_entry); #endif /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TASKS_RCU p->rcu_tasks_holdout = false; INIT_LIST_HEAD(&p->rcu_tasks_holdout_list); p->rcu_tasks_idle_cpu = -1; INIT_LIST_HEAD(&p->rcu_tasks_exit_list); #endif /* #ifdef CONFIG_TASKS_RCU */ #ifdef CONFIG_TASKS_TRACE_RCU p->trc_reader_nesting = 0; p->trc_reader_special.s = 0; INIT_LIST_HEAD(&p->trc_holdout_list); INIT_LIST_HEAD(&p->trc_blkd_node); #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ } /** * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd * @pid: the struct pid for which to create a pidfd * @flags: flags of the new @pidfd * @ret: Where to return the file for the pidfd. * * Allocate a new file that stashes @pid and reserve a new pidfd number in the * caller's file descriptor table. The pidfd is reserved but not installed yet. * * The helper doesn't perform checks on @pid which makes it useful for pidfds * created via CLONE_PIDFD where @pid has no task attached when the pidfd and * pidfd file are prepared. * * If this function returns successfully the caller is responsible to either * call fd_install() passing the returned pidfd and pidfd file as arguments in * order to install the pidfd into its file descriptor table or they must use * put_unused_fd() and fput() on the returned pidfd and pidfd file * respectively. * * This function is useful when a pidfd must already be reserved but there * might still be points of failure afterwards and the caller wants to ensure * that no pidfd is leaked into its file descriptor table. * * Return: On success, a reserved pidfd is returned from the function and a new * pidfd file is returned in the last argument to the function. On * error, a negative error code is returned from the function and the * last argument remains unchanged. */ static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) { int pidfd; struct file *pidfd_file; pidfd = get_unused_fd_flags(O_CLOEXEC); if (pidfd < 0) return pidfd; pidfd_file = pidfs_alloc_file(pid, flags | O_RDWR); if (IS_ERR(pidfd_file)) { put_unused_fd(pidfd); return PTR_ERR(pidfd_file); } /* * anon_inode_getfile() ignores everything outside of the * O_ACCMODE | O_NONBLOCK mask, set PIDFD_THREAD manually. */ pidfd_file->f_flags |= (flags & PIDFD_THREAD); *ret = pidfd_file; return pidfd; } /** * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd * @pid: the struct pid for which to create a pidfd * @flags: flags of the new @pidfd * @ret: Where to return the pidfd. * * Allocate a new file that stashes @pid and reserve a new pidfd number in the * caller's file descriptor table. The pidfd is reserved but not installed yet. * * The helper verifies that @pid is still in use, without PIDFD_THREAD the * task identified by @pid must be a thread-group leader. * * If this function returns successfully the caller is responsible to either * call fd_install() passing the returned pidfd and pidfd file as arguments in * order to install the pidfd into its file descriptor table or they must use * put_unused_fd() and fput() on the returned pidfd and pidfd file * respectively. * * This function is useful when a pidfd must already be reserved but there * might still be points of failure afterwards and the caller wants to ensure * that no pidfd is leaked into its file descriptor table. * * Return: On success, a reserved pidfd is returned from the function and a new * pidfd file is returned in the last argument to the function. On * error, a negative error code is returned from the function and the * last argument remains unchanged. */ int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) { if (!pid) return -EINVAL; scoped_guard(rcu) { struct task_struct *tsk; if (flags & PIDFD_THREAD) tsk = pid_task(pid, PIDTYPE_PID); else tsk = pid_task(pid, PIDTYPE_TGID); if (!tsk) return -EINVAL; /* Don't create pidfds for kernel threads for now. */ if (tsk->flags & PF_KTHREAD) return -EINVAL; } return __pidfd_prepare(pid, flags, ret); } static void __delayed_free_task(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); free_task(tsk); } static __always_inline void delayed_free_task(struct task_struct *tsk) { if (IS_ENABLED(CONFIG_MEMCG)) call_rcu(&tsk->rcu, __delayed_free_task); else free_task(tsk); } static void copy_oom_score_adj(u64 clone_flags, struct task_struct *tsk) { /* Skip if kernel thread */ if (!tsk->mm) return; /* Skip if spawning a thread or using vfork */ if ((clone_flags & (CLONE_VM | CLONE_THREAD | CLONE_VFORK)) != CLONE_VM) return; /* We need to synchronize with __set_oom_adj */ mutex_lock(&oom_adj_mutex); set_bit(MMF_MULTIPROCESS, &tsk->mm->flags); /* Update the values in case they were changed after copy_signal */ tsk->signal->oom_score_adj = current->signal->oom_score_adj; tsk->signal->oom_score_adj_min = current->signal->oom_score_adj_min; mutex_unlock(&oom_adj_mutex); } #ifdef CONFIG_RV static void rv_task_fork(struct task_struct *p) { int i; for (i = 0; i < RV_PER_TASK_MONITORS; i++) p->rv[i].da_mon.monitoring = false; } #else #define rv_task_fork(p) do {} while (0) #endif /* * This creates a new process as a copy of the old one, * but does not actually start it yet. * * It copies the registers, and all the appropriate * parts of the process environment (as per the clone * flags). The actual kick-off is left to the caller. */ __latent_entropy struct task_struct *copy_process( struct pid *pid, int trace, int node, struct kernel_clone_args *args) { int pidfd = -1, retval; struct task_struct *p; struct multiprocess_signals delayed; struct file *pidfile = NULL; const u64 clone_flags = args->flags; struct nsproxy *nsp = current->nsproxy; /* * Don't allow sharing the root directory with processes in a different * namespace */ if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. */ if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)) return ERR_PTR(-EINVAL); /* * Shared signal handlers imply shared VM. By way of the above, * thread groups also imply shared VM. Blocking this case allows * for various simplifications in other code. */ if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)) return ERR_PTR(-EINVAL); /* * Siblings of global init remain as zombies on exit since they are * not reaped by their parent (swapper). To solve this and to avoid * multi-rooted process trees, prevent global and container-inits * from creating siblings. */ if ((clone_flags & CLONE_PARENT) && current->signal->flags & SIGNAL_UNKILLABLE) return ERR_PTR(-EINVAL); /* * If the new process will be in a different pid or user namespace * do not allow it to share a thread group with the forking task. */ if (clone_flags & CLONE_THREAD) { if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || (task_active_pid_ns(current) != nsp->pid_ns_for_children)) return ERR_PTR(-EINVAL); } if (clone_flags & CLONE_PIDFD) { /* * - CLONE_DETACHED is blocked so that we can potentially * reuse it later for CLONE_PIDFD. */ if (clone_flags & CLONE_DETACHED) return ERR_PTR(-EINVAL); } /* * Force any signals received before this point to be delivered * before the fork happens. Collect up signals sent to multiple * processes that happen during the fork and delay them so that * they appear to happen after the fork. */ sigemptyset(&delayed.signal); INIT_HLIST_NODE(&delayed.node); spin_lock_irq(¤t->sighand->siglock); if (!(clone_flags & CLONE_THREAD)) hlist_add_head(&delayed.node, ¤t->signal->multiprocess); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); retval = -ERESTARTNOINTR; if (task_sigpending(current)) goto fork_out; retval = -ENOMEM; p = dup_task_struct(current, node); if (!p) goto fork_out; p->flags &= ~PF_KTHREAD; if (args->kthread) p->flags |= PF_KTHREAD; if (args->user_worker) { /* * Mark us a user worker, and block any signal that isn't * fatal or STOP */ p->flags |= PF_USER_WORKER; siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP)); } if (args->io_thread) p->flags |= PF_IO_WORKER; if (args->name) strscpy_pad(p->comm, args->name, sizeof(p->comm)); p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL; /* * Clear TID on mm_release()? */ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? args->child_tid : NULL; ftrace_graph_init_task(p); rt_mutex_init_task(p); lockdep_assert_irqs_enabled(); #ifdef CONFIG_PROVE_LOCKING DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); #endif retval = copy_creds(p, clone_flags); if (retval < 0) goto bad_fork_free; retval = -EAGAIN; if (is_rlimit_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) { if (p->real_cred->user != INIT_USER && !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) goto bad_fork_cleanup_count; } current->flags &= ~PF_NPROC_EXCEEDED; /* * If multiple threads are within copy_process(), then this check * triggers too late. This doesn't hurt, the check is only there * to stop root fork bombs. */ retval = -EAGAIN; if (data_race(nr_threads >= max_threads)) goto bad_fork_cleanup_count; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ p->flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER | PF_IDLE | PF_NO_SETAFFINITY); p->flags |= PF_FORKNOEXEC; INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); rcu_copy_process(p); p->vfork_done = NULL; spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); p->utime = p->stime = p->gtime = 0; #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME p->utimescaled = p->stimescaled = 0; #endif prev_cputime_init(&p->prev_cputime); #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN seqcount_init(&p->vtime.seqcount); p->vtime.starttime = 0; p->vtime.state = VTIME_INACTIVE; #endif #ifdef CONFIG_IO_URING p->io_uring = NULL; #endif p->default_timer_slack_ns = current->timer_slack_ns; #ifdef CONFIG_PSI p->psi_flags = 0; #endif task_io_accounting_init(&p->ioac); acct_clear_integrals(p); posix_cputimers_init(&p->posix_cputimers); p->io_context = NULL; audit_set_context(p, NULL); cgroup_fork(p); if (args->kthread) { if (!set_kthread_struct(p)) goto bad_fork_cleanup_delayacct; } #ifdef CONFIG_NUMA p->mempolicy = mpol_dup(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; goto bad_fork_cleanup_delayacct; } #endif #ifdef CONFIG_CPUSETS p->cpuset_mem_spread_rotor = NUMA_NO_NODE; p->cpuset_slab_spread_rotor = NUMA_NO_NODE; seqcount_spinlock_init(&p->mems_allowed_seq, &p->alloc_lock); #endif #ifdef CONFIG_TRACE_IRQFLAGS memset(&p->irqtrace, 0, sizeof(p->irqtrace)); p->irqtrace.hardirq_disable_ip = _THIS_IP_; p->irqtrace.softirq_enable_ip = _THIS_IP_; p->softirqs_enabled = 1; p->softirq_context = 0; #endif p->pagefault_disabled = 0; #ifdef CONFIG_LOCKDEP lockdep_init_task(p); #endif #ifdef CONFIG_DEBUG_MUTEXES p->blocked_on = NULL; /* not blocked yet */ #endif #ifdef CONFIG_BCACHE p->sequential_io = 0; p->sequential_io_avg = 0; #endif #ifdef CONFIG_BPF_SYSCALL RCU_INIT_POINTER(p->bpf_storage, NULL); p->bpf_ctx = NULL; #endif /* Perform scheduler related setup. Assign this task to a CPU. */ retval = sched_fork(clone_flags, p); if (retval) goto bad_fork_cleanup_policy; retval = perf_event_init_task(p, clone_flags); if (retval) goto bad_fork_cleanup_policy; retval = audit_alloc(p); if (retval) goto bad_fork_cleanup_perf; /* copy all the process information */ shm_init_task(p); retval = security_task_alloc(p, clone_flags); if (retval) goto bad_fork_cleanup_audit; retval = copy_semundo(clone_flags, p); if (retval) goto bad_fork_cleanup_security; retval = copy_files(clone_flags, p, args->no_files); if (retval) goto bad_fork_cleanup_semundo; retval = copy_fs(clone_flags, p); if (retval) goto bad_fork_cleanup_files; retval = copy_sighand(clone_flags, p); if (retval) goto bad_fork_cleanup_fs; retval = copy_signal(clone_flags, p); if (retval) goto bad_fork_cleanup_sighand; retval = copy_mm(clone_flags, p); if (retval) goto bad_fork_cleanup_signal; retval = copy_namespaces(clone_flags, p); if (retval) goto bad_fork_cleanup_mm; retval = copy_io(clone_flags, p); if (retval) goto bad_fork_cleanup_namespaces; retval = copy_thread(p, args); if (retval) goto bad_fork_cleanup_io; stackleak_task_init(p); if (pid != &init_struct_pid) { pid = alloc_pid(p->nsproxy->pid_ns_for_children, args->set_tid, args->set_tid_size); if (IS_ERR(pid)) { retval = PTR_ERR(pid); goto bad_fork_cleanup_thread; } } /* * This has to happen after we've potentially unshared the file * descriptor table (so that the pidfd doesn't leak into the child * if the fd table isn't shared). */ if (clone_flags & CLONE_PIDFD) { int flags = (clone_flags & CLONE_THREAD) ? PIDFD_THREAD : 0; /* Don't create pidfds for kernel threads for now. */ if (args->kthread) { retval = -EINVAL; goto bad_fork_free_pid; } /* Note that no task has been attached to @pid yet. */ retval = __pidfd_prepare(pid, flags, &pidfile); if (retval < 0) goto bad_fork_free_pid; pidfd = retval; retval = put_user(pidfd, args->pidfd); if (retval) goto bad_fork_put_pidfd; } #ifdef CONFIG_BLOCK p->plug = NULL; #endif futex_init_task(p); /* * sigaltstack should be cleared when sharing the same VM */ if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM) sas_ss_reset(p); /* * Syscall tracing and stepping should be turned off in the * child regardless of CLONE_PTRACE. */ user_disable_single_step(p); clear_task_syscall_work(p, SYSCALL_TRACE); #if defined(CONFIG_GENERIC_ENTRY) || defined(TIF_SYSCALL_EMU) clear_task_syscall_work(p, SYSCALL_EMU); #endif clear_tsk_latency_tracing(p); /* ok, now we should be set up.. */ p->pid = pid_nr(pid); if (clone_flags & CLONE_THREAD) { p->group_leader = current->group_leader; p->tgid = current->tgid; } else { p->group_leader = p; p->tgid = p->pid; } p->nr_dirtied = 0; p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); p->dirty_paused_when = 0; p->pdeath_signal = 0; p->task_works = NULL; clear_posix_cputimers_work(p); #ifdef CONFIG_KRETPROBES p->kretprobe_instances.first = NULL; #endif #ifdef CONFIG_RETHOOK p->rethooks.first = NULL; #endif /* * Ensure that the cgroup subsystem policies allow the new process to be * forked. It should be noted that the new process's css_set can be changed * between here and cgroup_post_fork() if an organisation operation is in * progress. */ retval = cgroup_can_fork(p, args); if (retval) goto bad_fork_put_pidfd; /* * Now that the cgroups are pinned, re-clone the parent cgroup and put * the new task on the correct runqueue. All this *before* the task * becomes visible. * * This isn't part of ->can_fork() because while the re-cloning is * cgroup specific, it unconditionally needs to place the task on a * runqueue. */ sched_cgroup_fork(p, args); /* * From this point on we must avoid any synchronous user-space * communication until we take the tasklist-lock. In particular, we do * not want user-space to be able to predict the process start-time by * stalling fork(2) after we recorded the start_time but before it is * visible to the system. */ p->start_time = ktime_get_ns(); p->start_boottime = ktime_get_boottime_ns(); /* * Make it visible to the rest of the system, but dont wake it up yet. * Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); /* CLONE_PARENT re-uses the old parent */ if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; if (clone_flags & CLONE_THREAD) p->exit_signal = -1; else p->exit_signal = current->group_leader->exit_signal; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; p->exit_signal = args->exit_signal; } klp_copy_process(p); sched_core_fork(p); spin_lock(¤t->sighand->siglock); rv_task_fork(p); rseq_fork(p, clone_flags); /* Don't start children in a dying pid namespace */ if (unlikely(!(ns_of_pid(pid)->pid_allocated & PIDNS_ADDING))) { retval = -ENOMEM; goto bad_fork_cancel_cgroup; } /* Let kill terminate clone/fork in the middle */ if (fatal_signal_pending(current)) { retval = -EINTR; goto bad_fork_cancel_cgroup; } /* No more failure paths after this point. */ /* * Copy seccomp details explicitly here, in case they were changed * before holding sighand lock. */ copy_seccomp(p); init_task_pid_links(p); if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); init_task_pid(p, PIDTYPE_PID, pid); if (thread_group_leader(p)) { init_task_pid(p, PIDTYPE_TGID, pid); init_task_pid(p, PIDTYPE_PGID, task_pgrp(current)); init_task_pid(p, PIDTYPE_SID, task_session(current)); if (is_child_reaper(pid)) { ns_of_pid(pid)->child_reaper = p; p->signal->flags |= SIGNAL_UNKILLABLE; } p->signal->shared_pending.signal = delayed.signal; p->signal->tty = tty_kref_get(current->signal->tty); /* * Inherit has_child_subreaper flag under the same * tasklist_lock with adding child to the process tree * for propagate_has_child_subreaper optimization. */ p->signal->has_child_subreaper = p->real_parent->signal->has_child_subreaper || p->real_parent->signal->is_child_subreaper; list_add_tail(&p->sibling, &p->real_parent->children); list_add_tail_rcu(&p->tasks, &init_task.tasks); attach_pid(p, PIDTYPE_TGID); attach_pid(p, PIDTYPE_PGID); attach_pid(p, PIDTYPE_SID); __this_cpu_inc(process_counts); } else { current->signal->nr_threads++; current->signal->quick_threads++; atomic_inc(¤t->signal->live); refcount_inc(¤t->signal->sigcnt); task_join_group_stop(p); list_add_tail_rcu(&p->thread_node, &p->signal->thread_head); } attach_pid(p, PIDTYPE_PID); nr_threads++; } total_forks++; hlist_del_init(&delayed.node); spin_unlock(¤t->sighand->siglock); syscall_tracepoint_update(p); write_unlock_irq(&tasklist_lock); if (pidfile) fd_install(pidfd, pidfile); proc_fork_connector(p); sched_post_fork(p); cgroup_post_fork(p, args); perf_event_fork(p); trace_task_newtask(p, clone_flags); uprobe_copy_process(p, clone_flags); user_events_fork(p, clone_flags); copy_oom_score_adj(clone_flags, p); return p; bad_fork_cancel_cgroup: sched_core_free(p); spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); cgroup_cancel_fork(p, args); bad_fork_put_pidfd: if (clone_flags & CLONE_PIDFD) { fput(pidfile); put_unused_fd(pidfd); } bad_fork_free_pid: if (pid != &init_struct_pid) free_pid(pid); bad_fork_cleanup_thread: exit_thread(p); bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) { mm_clear_owner(p->mm, p); mmput(p->mm); } bad_fork_cleanup_signal: if (!(clone_flags & CLONE_THREAD)) free_signal_struct(p->signal); bad_fork_cleanup_sighand: __cleanup_sighand(p->sighand); bad_fork_cleanup_fs: exit_fs(p); /* blocking */ bad_fork_cleanup_files: exit_files(p); /* blocking */ bad_fork_cleanup_semundo: exit_sem(p); bad_fork_cleanup_security: security_task_free(p); bad_fork_cleanup_audit: audit_free(p); bad_fork_cleanup_perf: perf_event_free_task(p); bad_fork_cleanup_policy: lockdep_free_task(p); #ifdef CONFIG_NUMA mpol_put(p->mempolicy); #endif bad_fork_cleanup_delayacct: delayacct_tsk_free(p); bad_fork_cleanup_count: dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1); exit_creds(p); bad_fork_free: WRITE_ONCE(p->__state, TASK_DEAD); exit_task_stack_account(p); put_task_stack(p); delayed_free_task(p); fork_out: spin_lock_irq(¤t->sighand->siglock); hlist_del_init(&delayed.node); spin_unlock_irq(¤t->sighand->siglock); return ERR_PTR(retval); } static inline void init_idle_pids(struct task_struct *idle) { enum pid_type type; for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { INIT_HLIST_NODE(&idle->pid_links[type]); /* not really needed */ init_task_pid(idle, type, &init_struct_pid); } } static int idle_dummy(void *dummy) { /* This function is never called */ return 0; } struct task_struct * __init fork_idle(int cpu) { struct task_struct *task; struct kernel_clone_args args = { .flags = CLONE_VM, .fn = &idle_dummy, .fn_arg = NULL, .kthread = 1, .idle = 1, }; task = copy_process(&init_struct_pid, 0, cpu_to_node(cpu), &args); if (!IS_ERR(task)) { init_idle_pids(task); init_idle(task, cpu); } return task; } /* * This is like kernel_clone(), but shaved down and tailored to just * creating io_uring workers. It returns a created task, or an error pointer. * The returned task is inactive, and the caller must fire it up through * wake_up_new_task(p). All signals are blocked in the created task. */ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) { unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| CLONE_IO; struct kernel_clone_args args = { .flags = ((lower_32_bits(flags) | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, .io_thread = 1, .user_worker = 1, }; return copy_process(NULL, 0, node, &args); } /* * Ok, this is the main fork-routine. * * It copies the process, and if successful kick-starts * it and waits for it to finish using the VM if required. * * args->exit_signal is expected to be checked for sanity by the caller. */ pid_t kernel_clone(struct kernel_clone_args *args) { u64 clone_flags = args->flags; struct completion vfork; struct pid *pid; struct task_struct *p; int trace = 0; pid_t nr; /* * For legacy clone() calls, CLONE_PIDFD uses the parent_tid argument * to return the pidfd. Hence, CLONE_PIDFD and CLONE_PARENT_SETTID are * mutually exclusive. With clone3() CLONE_PIDFD has grown a separate * field in struct clone_args and it still doesn't make sense to have * them both point at the same memory location. Performing this check * here has the advantage that we don't need to have a separate helper * to check for legacy clone(). */ if ((clone_flags & CLONE_PIDFD) && (clone_flags & CLONE_PARENT_SETTID) && (args->pidfd == args->parent_tid)) return -EINVAL; /* * Determine whether and which event to report to ptracer. When * called from kernel_thread or CLONE_UNTRACED is explicitly * requested, no event is reported; otherwise, report if the event * for the type of forking is enabled. */ if (!(clone_flags & CLONE_UNTRACED)) { if (clone_flags & CLONE_VFORK) trace = PTRACE_EVENT_VFORK; else if (args->exit_signal != SIGCHLD) trace = PTRACE_EVENT_CLONE; else trace = PTRACE_EVENT_FORK; if (likely(!ptrace_event_enabled(current, trace))) trace = 0; } p = copy_process(NULL, trace, NUMA_NO_NODE, args); add_latent_entropy(); if (IS_ERR(p)) return PTR_ERR(p); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. */ trace_sched_process_fork(current, p); pid = get_task_pid(p, PIDTYPE_PID); nr = pid_vnr(pid); if (clone_flags & CLONE_PARENT_SETTID) put_user(nr, args->parent_tid); if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); get_task_struct(p); } if (IS_ENABLED(CONFIG_LRU_GEN_WALKS_MMU) && !(clone_flags & CLONE_VM)) { /* lock the task to synchronize with memcg migration */ task_lock(p); lru_gen_add_mm(p->mm); task_unlock(p); } wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ if (unlikely(trace)) ptrace_event_pid(trace, pid); if (clone_flags & CLONE_VFORK) { if (!wait_for_vfork_done(p, &vfork)) ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid); } put_pid(pid); return nr; } /* * Create a kernel thread. */ pid_t kernel_thread(int (*fn)(void *), void *arg, const char *name, unsigned long flags) { struct kernel_clone_args args = { .flags = ((lower_32_bits(flags) | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, .name = name, .kthread = 1, }; return kernel_clone(&args); } /* * Create a user mode thread. */ pid_t user_mode_thread(int (*fn)(void *), void *arg, unsigned long flags) { struct kernel_clone_args args = { .flags = ((lower_32_bits(flags) | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, }; return kernel_clone(&args); } #ifdef __ARCH_WANT_SYS_FORK SYSCALL_DEFINE0(fork) { #ifdef CONFIG_MMU struct kernel_clone_args args = { .exit_signal = SIGCHLD, }; return kernel_clone(&args); #else /* can not support in nommu mode */ return -EINVAL; #endif } #endif #ifdef __ARCH_WANT_SYS_VFORK SYSCALL_DEFINE0(vfork) { struct kernel_clone_args args = { .flags = CLONE_VFORK | CLONE_VM, .exit_signal = SIGCHLD, }; return kernel_clone(&args); } #endif #ifdef __ARCH_WANT_SYS_CLONE #ifdef CONFIG_CLONE_BACKWARDS SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, unsigned long, tls, int __user *, child_tidptr) #elif defined(CONFIG_CLONE_BACKWARDS2) SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #elif defined(CONFIG_CLONE_BACKWARDS3) SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, int, stack_size, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #else SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, int __user *, parent_tidptr, int __user *, child_tidptr, unsigned long, tls) #endif { struct kernel_clone_args args = { .flags = (lower_32_bits(clone_flags) & ~CSIGNAL), .pidfd = parent_tidptr, .child_tid = child_tidptr, .parent_tid = parent_tidptr, .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL), .stack = newsp, .tls = tls, }; return kernel_clone(&args); } #endif noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs, struct clone_args __user *uargs, size_t usize) { int err; struct clone_args args; pid_t *kset_tid = kargs->set_tid; BUILD_BUG_ON(offsetofend(struct clone_args, tls) != CLONE_ARGS_SIZE_VER0); BUILD_BUG_ON(offsetofend(struct clone_args, set_tid_size) != CLONE_ARGS_SIZE_VER1); BUILD_BUG_ON(offsetofend(struct clone_args, cgroup) != CLONE_ARGS_SIZE_VER2); BUILD_BUG_ON(sizeof(struct clone_args) != CLONE_ARGS_SIZE_VER2); if (unlikely(usize > PAGE_SIZE)) return -E2BIG; if (unlikely(usize < CLONE_ARGS_SIZE_VER0)) return -EINVAL; err = copy_struct_from_user(&args, sizeof(args), uargs, usize); if (err) return err; if (unlikely(args.set_tid_size > MAX_PID_NS_LEVEL)) return -EINVAL; if (unlikely(!args.set_tid && args.set_tid_size > 0)) return -EINVAL; if (unlikely(args.set_tid && args.set_tid_size == 0)) return -EINVAL; /* * Verify that higher 32bits of exit_signal are unset and that * it is a valid signal */ if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) || !valid_signal(args.exit_signal))) return -EINVAL; if ((args.flags & CLONE_INTO_CGROUP) && (args.cgroup > INT_MAX || usize < CLONE_ARGS_SIZE_VER2)) return -EINVAL; *kargs = (struct kernel_clone_args){ .flags = args.flags, .pidfd = u64_to_user_ptr(args.pidfd), .child_tid = u64_to_user_ptr(args.child_tid), .parent_tid = u64_to_user_ptr(args.parent_tid), .exit_signal = args.exit_signal, .stack = args.stack, .stack_size = args.stack_size, .tls = args.tls, .set_tid_size = args.set_tid_size, .cgroup = args.cgroup, }; if (args.set_tid && copy_from_user(kset_tid, u64_to_user_ptr(args.set_tid), (kargs->set_tid_size * sizeof(pid_t)))) return -EFAULT; kargs->set_tid = kset_tid; return 0; } /** * clone3_stack_valid - check and prepare stack * @kargs: kernel clone args * * Verify that the stack arguments userspace gave us are sane. * In addition, set the stack direction for userspace since it's easy for us to * determine. */ static inline bool clone3_stack_valid(struct kernel_clone_args *kargs) { if (kargs->stack == 0) { if (kargs->stack_size > 0) return false; } else { if (kargs->stack_size == 0) return false; if (!access_ok((void __user *)kargs->stack, kargs->stack_size)) return false; #if !defined(CONFIG_STACK_GROWSUP) kargs->stack += kargs->stack_size; #endif } return true; } static bool clone3_args_valid(struct kernel_clone_args *kargs) { /* Verify that no unknown flags are passed along. */ if (kargs->flags & ~(CLONE_LEGACY_FLAGS | CLONE_CLEAR_SIGHAND | CLONE_INTO_CGROUP)) return false; /* * - make the CLONE_DETACHED bit reusable for clone3 * - make the CSIGNAL bits reusable for clone3 */ if (kargs->flags & (CLONE_DETACHED | (CSIGNAL & (~CLONE_NEWTIME)))) return false; if ((kargs->flags & (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) == (CLONE_SIGHAND | CLONE_CLEAR_SIGHAND)) return false; if ((kargs->flags & (CLONE_THREAD | CLONE_PARENT)) && kargs->exit_signal) return false; if (!clone3_stack_valid(kargs)) return false; return true; } /** * sys_clone3 - create a new process with specific properties * @uargs: argument structure * @size: size of @uargs * * clone3() is the extensible successor to clone()/clone2(). * It takes a struct as argument that is versioned by its size. * * Return: On success, a positive PID for the child process. * On error, a negative errno number. */ SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size) { int err; struct kernel_clone_args kargs; pid_t set_tid[MAX_PID_NS_LEVEL]; #ifdef __ARCH_BROKEN_SYS_CLONE3 #warning clone3() entry point is missing, please fix return -ENOSYS; #endif kargs.set_tid = set_tid; err = copy_clone_args_from_user(&kargs, uargs, size); if (err) return err; if (!clone3_args_valid(&kargs)) return -EINVAL; return kernel_clone(&kargs); } void walk_process_tree(struct task_struct *top, proc_visitor visitor, void *data) { struct task_struct *leader, *parent, *child; int res; read_lock(&tasklist_lock); leader = top = top->group_leader; down: for_each_thread(leader, parent) { list_for_each_entry(child, &parent->children, sibling) { res = visitor(child, data); if (res) { if (res < 0) goto out; leader = child; goto down; } up: ; } } if (leader != top) { child = leader; parent = child->real_parent; leader = parent->group_leader; goto up; } out: read_unlock(&tasklist_lock); } #ifndef ARCH_MIN_MMSTRUCT_ALIGN #define ARCH_MIN_MMSTRUCT_ALIGN 0 #endif static void sighand_ctor(void *data) { struct sighand_struct *sighand = data; spin_lock_init(&sighand->siglock); init_waitqueue_head(&sighand->signalfd_wqh); } void __init mm_cache_init(void) { unsigned int mm_size; /* * The mm_cpumask is located at the end of mm_struct, and is * dynamically sized based on the maximum CPU number this system * can have, taking hotplug into account (nr_cpu_ids). */ mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size(); mm_cachep = kmem_cache_create_usercopy("mm_struct", mm_size, ARCH_MIN_MMSTRUCT_ALIGN, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, offsetof(struct mm_struct, saved_auxv), sizeof_field(struct mm_struct, saved_auxv), NULL); } void __init proc_caches_init(void) { sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| SLAB_ACCOUNT, sighand_ctor); signal_cachep = kmem_cache_create("signal_cache", sizeof(struct signal_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); files_cachep = kmem_cache_create("files_cache", sizeof(struct files_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); fs_cachep = kmem_cache_create("fs_cache", sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); #ifdef CONFIG_PER_VMA_LOCK vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); #endif mmap_init(); nsproxy_cache_init(); } /* * Check constraints on flags passed to the unshare system call. */ static int check_unshare_flags(unsigned long unshare_flags) { if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET| CLONE_NEWUSER|CLONE_NEWPID|CLONE_NEWCGROUP| CLONE_NEWTIME)) return -EINVAL; /* * Not implemented, but pretend it works if there is nothing * to unshare. Note that unsharing the address space or the * signal handlers also need to unshare the signal queues (aka * CLONE_THREAD). */ if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { if (!thread_group_empty(current)) return -EINVAL; } if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { if (refcount_read(¤t->sighand->count) > 1) return -EINVAL; } if (unshare_flags & CLONE_VM) { if (!current_is_single_threaded()) return -EINVAL; } return 0; } /* * Unshare the filesystem structure if it is being shared */ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) { struct fs_struct *fs = current->fs; if (!(unshare_flags & CLONE_FS) || !fs) return 0; /* don't need lock here; in the worst case we'll do useless copy */ if (fs->users == 1) return 0; *new_fsp = copy_fs_struct(fs); if (!*new_fsp) return -ENOMEM; return 0; } /* * Unshare file descriptor table if it is being shared */ int unshare_fd(unsigned long unshare_flags, unsigned int max_fds, struct files_struct **new_fdp) { struct files_struct *fd = current->files; int error = 0; if ((unshare_flags & CLONE_FILES) && (fd && atomic_read(&fd->count) > 1)) { *new_fdp = dup_fd(fd, max_fds, &error); if (!*new_fdp) return error; } return 0; } /* * unshare allows a process to 'unshare' part of the process * context which was originally shared using clone. copy_* * functions used by kernel_clone() cannot be used here directly * because they modify an inactive task_struct that is being * constructed. Here we are modifying the current, active, * task_struct. */ int ksys_unshare(unsigned long unshare_flags) { struct fs_struct *fs, *new_fs = NULL; struct files_struct *new_fd = NULL; struct cred *new_cred = NULL; struct nsproxy *new_nsproxy = NULL; int do_sysvsem = 0; int err; /* * If unsharing a user namespace must also unshare the thread group * and unshare the filesystem root and working directories. */ if (unshare_flags & CLONE_NEWUSER) unshare_flags |= CLONE_THREAD | CLONE_FS; /* * If unsharing vm, must also unshare signal handlers. */ if (unshare_flags & CLONE_VM) unshare_flags |= CLONE_SIGHAND; /* * If unsharing a signal handlers, must also unshare the signal queues. */ if (unshare_flags & CLONE_SIGHAND) unshare_flags |= CLONE_THREAD; /* * If unsharing namespace, must also unshare filesystem information. */ if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; /* * CLONE_NEWIPC must also detach from the undolist: after switching * to a new ipc namespace, the semaphore arrays from the old * namespace are unreachable. */ if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) do_sysvsem = 1; err = unshare_fs(unshare_flags, &new_fs); if (err) goto bad_unshare_out; err = unshare_fd(unshare_flags, NR_OPEN_MAX, &new_fd); if (err) goto bad_unshare_cleanup_fs; err = unshare_userns(unshare_flags, &new_cred); if (err) goto bad_unshare_cleanup_fd; err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_cred, new_fs); if (err) goto bad_unshare_cleanup_cred; if (new_cred) { err = set_cred_ucounts(new_cred); if (err) goto bad_unshare_cleanup_cred; } if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) { if (do_sysvsem) { /* * CLONE_SYSVSEM is equivalent to sys_exit(). */ exit_sem(current); } if (unshare_flags & CLONE_NEWIPC) { /* Orphan segments in old ns (see sem above). */ exit_shm(current); shm_init_task(current); } if (new_nsproxy) switch_task_namespaces(current, new_nsproxy); task_lock(current); if (new_fs) { fs = current->fs; spin_lock(&fs->lock); current->fs = new_fs; if (--fs->users) new_fs = NULL; else new_fs = fs; spin_unlock(&fs->lock); } if (new_fd) swap(current->files, new_fd); task_unlock(current); if (new_cred) { /* Install the new user namespace */ commit_creds(new_cred); new_cred = NULL; } } perf_event_namespaces(current); bad_unshare_cleanup_cred: if (new_cred) put_cred(new_cred); bad_unshare_cleanup_fd: if (new_fd) put_files_struct(new_fd); bad_unshare_cleanup_fs: if (new_fs) free_fs_struct(new_fs); bad_unshare_out: return err; } SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) { return ksys_unshare(unshare_flags); } /* * Helper to unshare the files of the current task. * We don't want to expose copy_files internals to * the exec layer of the kernel. */ int unshare_files(void) { struct task_struct *task = current; struct files_struct *old, *copy = NULL; int error; error = unshare_fd(CLONE_FILES, NR_OPEN_MAX, ©); if (error || !copy) return error; old = task->files; task_lock(task); task->files = copy; task_unlock(task); put_files_struct(old); return 0; } int sysctl_max_threads(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; int ret; int threads = max_threads; int min = 1; int max = MAX_THREADS; t = *table; t.data = &threads; t.extra1 = &min; t.extra2 = &max; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); if (ret || !write) return ret; max_threads = threads; return 0; } |
| 199 200 200 199 231 220 13 11 232 227 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 | // SPDX-License-Identifier: MIT /* * Copyright (C) 2019 Google, Inc. * * Authors: * Sean Paul <seanpaul@chromium.org> */ #include <linux/average.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_mode_config.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_print.h> #include <drm/drm_self_refresh_helper.h> /** * DOC: overview * * This helper library provides an easy way for drivers to leverage the atomic * framework to implement panel self refresh (SR) support. Drivers are * responsible for initializing and cleaning up the SR helpers on load/unload * (see &drm_self_refresh_helper_init/&drm_self_refresh_helper_cleanup). * The connector is responsible for setting * &drm_connector_state.self_refresh_aware to true at runtime if it is SR-aware * (meaning it knows how to initiate self refresh on the panel). * * Once a crtc has enabled SR using &drm_self_refresh_helper_init, the * helpers will monitor activity and call back into the driver to enable/disable * SR as appropriate. The best way to think about this is that it's a DPMS * on/off request with &drm_crtc_state.self_refresh_active set in crtc state * that tells you to disable/enable SR on the panel instead of power-cycling it. * * During SR, drivers may choose to fully disable their crtc/encoder/bridge * hardware (in which case no driver changes are necessary), or they can inspect * &drm_crtc_state.self_refresh_active if they want to enter low power mode * without full disable (in case full disable/enable is too slow). * * SR will be deactivated if there are any atomic updates affecting the * pipe that is in SR mode. If a crtc is driving multiple connectors, all * connectors must be SR aware and all will enter/exit SR mode at the same time. * * If the crtc and connector are SR aware, but the panel connected does not * support it (or is otherwise unable to enter SR), the driver should fail * atomic_check when &drm_crtc_state.self_refresh_active is true. */ #define SELF_REFRESH_AVG_SEED_MS 200 DECLARE_EWMA(psr_time, 4, 4) struct drm_self_refresh_data { struct drm_crtc *crtc; struct delayed_work entry_work; struct mutex avg_mutex; struct ewma_psr_time entry_avg_ms; struct ewma_psr_time exit_avg_ms; }; static void drm_self_refresh_helper_entry_work(struct work_struct *work) { struct drm_self_refresh_data *sr_data = container_of( to_delayed_work(work), struct drm_self_refresh_data, entry_work); struct drm_crtc *crtc = sr_data->crtc; struct drm_device *dev = crtc->dev; struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct drm_connector *conn; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; int i, ret = 0; drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(dev); if (!state) { ret = -ENOMEM; goto out_drop_locks; } retry: state->acquire_ctx = &ctx; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto out; } if (!crtc_state->enable) goto out; ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) goto out; for_each_new_connector_in_state(state, conn, conn_state, i) { if (!conn_state->self_refresh_aware) goto out; } crtc_state->active = false; crtc_state->self_refresh_active = true; ret = drm_atomic_commit(state); if (ret) goto out; out: if (ret == -EDEADLK) { drm_atomic_state_clear(state); ret = drm_modeset_backoff(&ctx); if (!ret) goto retry; } drm_atomic_state_put(state); out_drop_locks: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } /** * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages * @state: the state which has just been applied to hardware * @commit_time_ms: the amount of time in ms that this commit took to complete * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in * new state * * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will * update the average entry/exit self refresh times on self refresh transitions. * These averages will be used when calculating how long to delay before * entering self refresh mode after activity. */ void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, unsigned int commit_time_ms, unsigned int new_self_refresh_mask) { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int i; for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { bool new_self_refresh_active = new_self_refresh_mask & BIT(i); struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; struct ewma_psr_time *time; if (old_crtc_state->self_refresh_active == new_self_refresh_active) continue; if (new_self_refresh_active) time = &sr_data->entry_avg_ms; else time = &sr_data->exit_avg_ms; mutex_lock(&sr_data->avg_mutex); ewma_psr_time_add(time, commit_time_ms); mutex_unlock(&sr_data->avg_mutex); } } EXPORT_SYMBOL(drm_self_refresh_helper_update_avg_times); /** * drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit * @state: the state currently being checked * * Called at the end of atomic check. This function checks the state for flags * incompatible with self refresh exit and changes them. This is a bit * disingenuous since userspace is expecting one thing and we're giving it * another. However in order to keep self refresh entirely hidden from * userspace, this is required. * * At the end, we queue up the self refresh entry work so we can enter PSR after * the desired delay. */ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int i; if (state->async_update || !state->allow_modeset) { for_each_old_crtc_in_state(state, crtc, crtc_state, i) { if (crtc_state->self_refresh_active) { state->async_update = false; state->allow_modeset = true; break; } } } for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct drm_self_refresh_data *sr_data; unsigned int delay; /* Don't trigger the entry timer when we're already in SR */ if (crtc_state->self_refresh_active) continue; sr_data = crtc->self_refresh_data; if (!sr_data) continue; mutex_lock(&sr_data->avg_mutex); delay = (ewma_psr_time_read(&sr_data->entry_avg_ms) + ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2; mutex_unlock(&sr_data->avg_mutex); mod_delayed_work(system_wq, &sr_data->entry_work, msecs_to_jiffies(delay)); } } EXPORT_SYMBOL(drm_self_refresh_helper_alter_state); /** * drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc * @crtc: the crtc which supports self refresh supported displays * * Returns zero if successful or -errno on failure */ int drm_self_refresh_helper_init(struct drm_crtc *crtc) { struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; /* Helper is already initialized */ if (WARN_ON(sr_data)) return -EINVAL; sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL); if (!sr_data) return -ENOMEM; INIT_DELAYED_WORK(&sr_data->entry_work, drm_self_refresh_helper_entry_work); sr_data->crtc = crtc; mutex_init(&sr_data->avg_mutex); ewma_psr_time_init(&sr_data->entry_avg_ms); ewma_psr_time_init(&sr_data->exit_avg_ms); /* * Seed the averages so they're non-zero (and sufficiently large * for even poorly performing panels). As time goes on, this will be * averaged out and the values will trend to their true value. */ ewma_psr_time_add(&sr_data->entry_avg_ms, SELF_REFRESH_AVG_SEED_MS); ewma_psr_time_add(&sr_data->exit_avg_ms, SELF_REFRESH_AVG_SEED_MS); crtc->self_refresh_data = sr_data; return 0; } EXPORT_SYMBOL(drm_self_refresh_helper_init); /** * drm_self_refresh_helper_cleanup - Cleans up self refresh helpers for a crtc * @crtc: the crtc to cleanup */ void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc) { struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; /* Helper is already uninitialized */ if (!sr_data) return; crtc->self_refresh_data = NULL; cancel_delayed_work_sync(&sr_data->entry_work); kfree(sr_data); } EXPORT_SYMBOL(drm_self_refresh_helper_cleanup); |
| 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 | // SPDX-License-Identifier: GPL-2.0-only /* * Architecture specific (i386/x86_64) functions for kexec based crash dumps. * * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) * * Copyright (C) IBM Corporation, 2004. All rights reserved. * Copyright (C) Red Hat Inc., 2014. All rights reserved. * Authors: * Vivek Goyal <vgoyal@redhat.com> * */ #define pr_fmt(fmt) "kexec: " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/smp.h> #include <linux/reboot.h> #include <linux/kexec.h> #include <linux/delay.h> #include <linux/elf.h> #include <linux/elfcore.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/memblock.h> #include <asm/bootparam.h> #include <asm/processor.h> #include <asm/hardirq.h> #include <asm/nmi.h> #include <asm/hw_irq.h> #include <asm/apic.h> #include <asm/e820/types.h> #include <asm/io_apic.h> #include <asm/hpet.h> #include <linux/kdebug.h> #include <asm/cpu.h> #include <asm/reboot.h> #include <asm/intel_pt.h> #include <asm/crash.h> #include <asm/cmdline.h> #include <asm/sev.h> /* Used while preparing memory map entries for second kernel */ struct crash_memmap_data { struct boot_params *params; /* Type of memory */ unsigned int type; }; #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) static void kdump_nmi_callback(int cpu, struct pt_regs *regs) { crash_save_cpu(regs, cpu); /* * Disable Intel PT to stop its logging */ cpu_emergency_stop_pt(); kdump_sev_callback(); disable_local_APIC(); } void kdump_nmi_shootdown_cpus(void) { nmi_shootdown_cpus(kdump_nmi_callback); disable_local_APIC(); } /* Override the weak function in kernel/panic.c */ void crash_smp_send_stop(void) { static int cpus_stopped; if (cpus_stopped) return; if (smp_ops.crash_stop_other_cpus) smp_ops.crash_stop_other_cpus(); else smp_send_stop(); cpus_stopped = 1; } #else void crash_smp_send_stop(void) { /* There are no cpus to shootdown */ } #endif void native_machine_crash_shutdown(struct pt_regs *regs) { /* This function is only called after the system * has panicked or is otherwise in a critical state. * The minimum amount of code to allow a kexec'd kernel * to run successfully needs to happen here. * * In practice this means shooting down the other cpus in * an SMP system. */ /* The kernel is broken so disable interrupts */ local_irq_disable(); crash_smp_send_stop(); cpu_emergency_disable_virtualization(); /* * Disable Intel PT to stop its logging */ cpu_emergency_stop_pt(); #ifdef CONFIG_X86_IO_APIC /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ ioapic_zap_locks(); clear_IO_APIC(); #endif lapic_shutdown(); restore_boot_irq_mode(); #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif /* * Non-crash kexec calls enc_kexec_begin() while scheduling is still * active. This allows the callback to wait until all in-flight * shared<->private conversions are complete. In a crash scenario, * enc_kexec_begin() gets called after all but one CPU have been shut * down and interrupts have been disabled. This allows the callback to * detect a race with the conversion and report it. */ x86_platform.guest.enc_kexec_begin(); x86_platform.guest.enc_kexec_finish(); crash_save_cpu(regs, safe_smp_processor_id()); } #if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_HOTPLUG) static int get_nr_ram_ranges_callback(struct resource *res, void *arg) { unsigned int *nr_ranges = arg; (*nr_ranges)++; return 0; } /* Gather all the required information to prepare elf headers for ram regions */ static struct crash_mem *fill_up_crash_elf_data(void) { unsigned int nr_ranges = 0; struct crash_mem *cmem; walk_system_ram_res(0, -1, &nr_ranges, get_nr_ram_ranges_callback); if (!nr_ranges) return NULL; /* * Exclusion of crash region and/or crashk_low_res may cause * another range split. So add extra two slots here. */ nr_ranges += 2; cmem = vzalloc(struct_size(cmem, ranges, nr_ranges)); if (!cmem) return NULL; cmem->max_nr_ranges = nr_ranges; cmem->nr_ranges = 0; return cmem; } /* * Look for any unwanted ranges between mstart, mend and remove them. This * might lead to split and split ranges are put in cmem->ranges[] array */ static int elf_header_exclude_ranges(struct crash_mem *cmem) { int ret = 0; /* Exclude the low 1M because it is always reserved */ ret = crash_exclude_mem_range(cmem, 0, SZ_1M - 1); if (ret) return ret; /* Exclude crashkernel region */ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); if (ret) return ret; if (crashk_low_res.end) ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); return ret; } static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg) { struct crash_mem *cmem = arg; cmem->ranges[cmem->nr_ranges].start = res->start; cmem->ranges[cmem->nr_ranges].end = res->end; cmem->nr_ranges++; return 0; } /* Prepare elf headers. Return addr and size */ static int prepare_elf_headers(void **addr, unsigned long *sz, unsigned long *nr_mem_ranges) { struct crash_mem *cmem; int ret; cmem = fill_up_crash_elf_data(); if (!cmem) return -ENOMEM; ret = walk_system_ram_res(0, -1, cmem, prepare_elf64_ram_headers_callback); if (ret) goto out; /* Exclude unwanted mem ranges */ ret = elf_header_exclude_ranges(cmem); if (ret) goto out; /* Return the computed number of memory ranges, for hotplug usage */ *nr_mem_ranges = cmem->nr_ranges; /* By default prepare 64bit headers */ ret = crash_prepare_elf64_headers(cmem, IS_ENABLED(CONFIG_X86_64), addr, sz); out: vfree(cmem); return ret; } #endif #ifdef CONFIG_KEXEC_FILE static int add_e820_entry(struct boot_params *params, struct e820_entry *entry) { unsigned int nr_e820_entries; nr_e820_entries = params->e820_entries; if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE) return 1; memcpy(¶ms->e820_table[nr_e820_entries], entry, sizeof(struct e820_entry)); params->e820_entries++; return 0; } static int memmap_entry_callback(struct resource *res, void *arg) { struct crash_memmap_data *cmd = arg; struct boot_params *params = cmd->params; struct e820_entry ei; ei.addr = res->start; ei.size = resource_size(res); ei.type = cmd->type; add_e820_entry(params, &ei); return 0; } static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem, unsigned long long mstart, unsigned long long mend) { unsigned long start, end; cmem->ranges[0].start = mstart; cmem->ranges[0].end = mend; cmem->nr_ranges = 1; /* Exclude elf header region */ start = image->elf_load_addr; end = start + image->elf_headers_sz - 1; return crash_exclude_mem_range(cmem, start, end); } /* Prepare memory map for crash dump kernel */ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params) { int i, ret = 0; unsigned long flags; struct e820_entry ei; struct crash_memmap_data cmd; struct crash_mem *cmem; cmem = vzalloc(struct_size(cmem, ranges, 1)); if (!cmem) return -ENOMEM; memset(&cmd, 0, sizeof(struct crash_memmap_data)); cmd.params = params; /* Add the low 1M */ cmd.type = E820_TYPE_RAM; flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; walk_iomem_res_desc(IORES_DESC_NONE, flags, 0, (1<<20)-1, &cmd, memmap_entry_callback); /* Add ACPI tables */ cmd.type = E820_TYPE_ACPI; flags = IORESOURCE_MEM | IORESOURCE_BUSY; walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd, memmap_entry_callback); /* Add ACPI Non-volatile Storage */ cmd.type = E820_TYPE_NVS; walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd, memmap_entry_callback); /* Add e820 reserved ranges */ cmd.type = E820_TYPE_RESERVED; flags = IORESOURCE_MEM; walk_iomem_res_desc(IORES_DESC_RESERVED, flags, 0, -1, &cmd, memmap_entry_callback); /* Add crashk_low_res region */ if (crashk_low_res.end) { ei.addr = crashk_low_res.start; ei.size = resource_size(&crashk_low_res); ei.type = E820_TYPE_RAM; add_e820_entry(params, &ei); } /* Exclude some ranges from crashk_res and add rest to memmap */ ret = memmap_exclude_ranges(image, cmem, crashk_res.start, crashk_res.end); if (ret) goto out; for (i = 0; i < cmem->nr_ranges; i++) { ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1; /* If entry is less than a page, skip it */ if (ei.size < PAGE_SIZE) continue; ei.addr = cmem->ranges[i].start; ei.type = E820_TYPE_RAM; add_e820_entry(params, &ei); } out: vfree(cmem); return ret; } int crash_load_segments(struct kimage *image) { int ret; unsigned long pnum = 0; struct kexec_buf kbuf = { .image = image, .buf_min = 0, .buf_max = ULONG_MAX, .top_down = false }; /* Prepare elf headers and add a segment */ ret = prepare_elf_headers(&kbuf.buffer, &kbuf.bufsz, &pnum); if (ret) return ret; image->elf_headers = kbuf.buffer; image->elf_headers_sz = kbuf.bufsz; kbuf.memsz = kbuf.bufsz; #ifdef CONFIG_CRASH_HOTPLUG /* * The elfcorehdr segment size accounts for VMCOREINFO, kernel_map, * maximum CPUs and maximum memory ranges. */ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) pnum = 2 + CONFIG_NR_CPUS_DEFAULT + CONFIG_CRASH_MAX_MEMORY_RANGES; else pnum += 2 + CONFIG_NR_CPUS_DEFAULT; if (pnum < (unsigned long)PN_XNUM) { kbuf.memsz = pnum * sizeof(Elf64_Phdr); kbuf.memsz += sizeof(Elf64_Ehdr); image->elfcorehdr_index = image->nr_segments; /* Mark as usable to crash kernel, else crash kernel fails on boot */ image->elf_headers_sz = kbuf.memsz; } else { pr_err("number of Phdrs %lu exceeds max\n", pnum); } #endif kbuf.buf_align = ELF_CORE_HEADER_ALIGN; kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; ret = kexec_add_buffer(&kbuf); if (ret) return ret; image->elf_load_addr = kbuf.mem; kexec_dprintk("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n", image->elf_load_addr, kbuf.bufsz, kbuf.memsz); return ret; } #endif /* CONFIG_KEXEC_FILE */ #ifdef CONFIG_CRASH_HOTPLUG #undef pr_fmt #define pr_fmt(fmt) "crash hp: " fmt int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags) { #ifdef CONFIG_KEXEC_FILE if (image->file_mode) return 1; #endif /* * Initially, crash hotplug support for kexec_load was added * with the KEXEC_UPDATE_ELFCOREHDR flag. Later, this * functionality was expanded to accommodate multiple kexec * segment updates, leading to the introduction of the * KEXEC_CRASH_HOTPLUG_SUPPORT kexec flag bit. Consequently, * when the kexec tool sends either of these flags, it indicates * that the required kexec segment (elfcorehdr) is excluded from * the SHA calculation. */ return (kexec_flags & KEXEC_UPDATE_ELFCOREHDR || kexec_flags & KEXEC_CRASH_HOTPLUG_SUPPORT); } unsigned int arch_crash_get_elfcorehdr_size(void) { unsigned int sz; /* kernel_map, VMCOREINFO and maximum CPUs */ sz = 2 + CONFIG_NR_CPUS_DEFAULT; if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) sz += CONFIG_CRASH_MAX_MEMORY_RANGES; sz *= sizeof(Elf64_Phdr); return sz; } /** * arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes * @image: a pointer to kexec_crash_image * @arg: struct memory_notify handler for memory hotplug case and * NULL for CPU hotplug case. * * Prepare the new elfcorehdr and replace the existing elfcorehdr. */ void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { void *elfbuf = NULL, *old_elfcorehdr; unsigned long nr_mem_ranges; unsigned long mem, memsz; unsigned long elfsz = 0; /* * As crash_prepare_elf64_headers() has already described all * possible CPUs, there is no need to update the elfcorehdr * for additional CPU changes. */ if ((image->file_mode || image->elfcorehdr_updated) && ((image->hp_action == KEXEC_CRASH_HP_ADD_CPU) || (image->hp_action == KEXEC_CRASH_HP_REMOVE_CPU))) return; /* * Create the new elfcorehdr reflecting the changes to CPU and/or * memory resources. */ if (prepare_elf_headers(&elfbuf, &elfsz, &nr_mem_ranges)) { pr_err("unable to create new elfcorehdr"); goto out; } /* * Obtain address and size of the elfcorehdr segment, and * check it against the new elfcorehdr buffer. */ mem = image->segment[image->elfcorehdr_index].mem; memsz = image->segment[image->elfcorehdr_index].memsz; if (elfsz > memsz) { pr_err("update elfcorehdr elfsz %lu > memsz %lu", elfsz, memsz); goto out; } /* * Copy new elfcorehdr over the old elfcorehdr at destination. */ old_elfcorehdr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT)); if (!old_elfcorehdr) { pr_err("mapping elfcorehdr segment failed\n"); goto out; } /* * Temporarily invalidate the crash image while the * elfcorehdr is updated. */ xchg(&kexec_crash_image, NULL); memcpy_flushcache(old_elfcorehdr, elfbuf, elfsz); xchg(&kexec_crash_image, image); kunmap_local(old_elfcorehdr); pr_debug("updated elfcorehdr\n"); out: vfree(elfbuf); } #endif |
| 172 395 356 357 46 46 73 73 385 385 109 79 144 74 74 74 121 121 121 74 74 74 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 | // SPDX-License-Identifier: GPL-2.0 #include <linux/slab.h> #include "messages.h" #include "ctree.h" #include "subpage.h" #include "btrfs_inode.h" /* * Subpage (sectorsize < PAGE_SIZE) support overview: * * Limitations: * * - Only support 64K page size for now * This is to make metadata handling easier, as 64K page would ensure * all nodesize would fit inside one page, thus we don't need to handle * cases where a tree block crosses several pages. * * - Only metadata read-write for now * The data read-write part is in development. * * - Metadata can't cross 64K page boundary * btrfs-progs and kernel have done that for a while, thus only ancient * filesystems could have such problem. For such case, do a graceful * rejection. * * Special behavior: * * - Metadata * Metadata read is fully supported. * Meaning when reading one tree block will only trigger the read for the * needed range, other unrelated range in the same page will not be touched. * * Metadata write support is partial. * The writeback is still for the full page, but we will only submit * the dirty extent buffers in the page. * * This means, if we have a metadata page like this: * * Page offset * 0 16K 32K 48K 64K * |/////////| |///////////| * \- Tree block A \- Tree block B * * Even if we just want to writeback tree block A, we will also writeback * tree block B if it's also dirty. * * This may cause extra metadata writeback which results more COW. * * Implementation: * * - Common * Both metadata and data will use a new structure, btrfs_subpage, to * record the status of each sector inside a page. This provides the extra * granularity needed. * * - Metadata * Since we have multiple tree blocks inside one page, we can't rely on page * locking anymore, or we will have greatly reduced concurrency or even * deadlocks (hold one tree lock while trying to lock another tree lock in * the same page). * * Thus for metadata locking, subpage support relies on io_tree locking only. * This means a slightly higher tree locking latency. */ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping) { if (fs_info->sectorsize >= PAGE_SIZE) return false; /* * Only data pages (either through DIO or compression) can have no * mapping. And if page->mapping->host is data inode, it's subpage. * As we have ruled our sectorsize >= PAGE_SIZE case already. */ if (!mapping || !mapping->host || is_data_inode(BTRFS_I(mapping->host))) return true; /* * Now the only remaining case is metadata, which we only go subpage * routine if nodesize < PAGE_SIZE. */ if (fs_info->nodesize < PAGE_SIZE) return true; return false; } void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize) { unsigned int cur = 0; unsigned int nr_bits; ASSERT(IS_ALIGNED(PAGE_SIZE, sectorsize)); nr_bits = PAGE_SIZE / sectorsize; subpage_info->bitmap_nr_bits = nr_bits; subpage_info->uptodate_offset = cur; cur += nr_bits; subpage_info->dirty_offset = cur; cur += nr_bits; subpage_info->writeback_offset = cur; cur += nr_bits; subpage_info->ordered_offset = cur; cur += nr_bits; subpage_info->checked_offset = cur; cur += nr_bits; subpage_info->locked_offset = cur; cur += nr_bits; subpage_info->total_nr_bits = cur; } int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio, enum btrfs_subpage_type type) { struct btrfs_subpage *subpage; /* * We have cases like a dummy extent buffer page, which is not mapped * and doesn't need to be locked. */ if (folio->mapping) ASSERT(folio_test_locked(folio)); /* Either not subpage, or the folio already has private attached. */ if (!btrfs_is_subpage(fs_info, folio->mapping) || folio_test_private(folio)) return 0; subpage = btrfs_alloc_subpage(fs_info, type); if (IS_ERR(subpage)) return PTR_ERR(subpage); folio_attach_private(folio, subpage); return 0; } void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, struct folio *folio) { struct btrfs_subpage *subpage; /* Either not subpage, or the folio already has private attached. */ if (!btrfs_is_subpage(fs_info, folio->mapping) || !folio_test_private(folio)) return; subpage = folio_detach_private(folio); ASSERT(subpage); btrfs_free_subpage(subpage); } struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, enum btrfs_subpage_type type) { struct btrfs_subpage *ret; unsigned int real_size; ASSERT(fs_info->sectorsize < PAGE_SIZE); real_size = struct_size(ret, bitmaps, BITS_TO_LONGS(fs_info->subpage_info->total_nr_bits)); ret = kzalloc(real_size, GFP_NOFS); if (!ret) return ERR_PTR(-ENOMEM); spin_lock_init(&ret->lock); if (type == BTRFS_SUBPAGE_METADATA) { atomic_set(&ret->eb_refs, 0); } else { atomic_set(&ret->readers, 0); atomic_set(&ret->writers, 0); } return ret; } void btrfs_free_subpage(struct btrfs_subpage *subpage) { kfree(subpage); } /* * Increase the eb_refs of current subpage. * * This is important for eb allocation, to prevent race with last eb freeing * of the same page. * With the eb_refs increased before the eb inserted into radix tree, * detach_extent_buffer_page() won't detach the folio private while we're still * allocating the extent buffer. */ void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) { struct btrfs_subpage *subpage; if (!btrfs_is_subpage(fs_info, folio->mapping)) return; ASSERT(folio_test_private(folio) && folio->mapping); lockdep_assert_held(&folio->mapping->i_private_lock); subpage = folio_get_private(folio); atomic_inc(&subpage->eb_refs); } void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio) { struct btrfs_subpage *subpage; if (!btrfs_is_subpage(fs_info, folio->mapping)) return; ASSERT(folio_test_private(folio) && folio->mapping); lockdep_assert_held(&folio->mapping->i_private_lock); subpage = folio_get_private(folio); ASSERT(atomic_read(&subpage->eb_refs)); atomic_dec(&subpage->eb_refs); } static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { /* For subpage support, the folio must be single page. */ ASSERT(folio_order(folio) == 0); /* Basic checks */ ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && IS_ALIGNED(len, fs_info->sectorsize)); /* * The range check only works for mapped page, we can still have * unmapped page like dummy extent buffer pages. */ if (folio->mapping) ASSERT(folio_pos(folio) <= start && start + len <= folio_pos(folio) + PAGE_SIZE); } #define subpage_calc_start_bit(fs_info, folio, name, start, len) \ ({ \ unsigned int __start_bit; \ \ btrfs_subpage_assert(fs_info, folio, start, len); \ __start_bit = offset_in_page(start) >> fs_info->sectorsize_bits; \ __start_bit += fs_info->subpage_info->name##_offset; \ __start_bit; \ }) void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); const int nbits = len >> fs_info->sectorsize_bits; unsigned long flags; btrfs_subpage_assert(fs_info, folio, start, len); spin_lock_irqsave(&subpage->lock, flags); /* * Even though it's just for reading the page, no one should have * locked the subpage range. */ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); bitmap_set(subpage->bitmaps, start_bit, nbits); atomic_add(nbits, &subpage->readers); spin_unlock_irqrestore(&subpage->lock, flags); } void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); const int nbits = len >> fs_info->sectorsize_bits; unsigned long flags; bool is_data; bool last; btrfs_subpage_assert(fs_info, folio, start, len); is_data = is_data_inode(BTRFS_I(folio->mapping->host)); spin_lock_irqsave(&subpage->lock, flags); /* The range should have already been locked. */ ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits)); ASSERT(atomic_read(&subpage->readers) >= nbits); bitmap_clear(subpage->bitmaps, start_bit, nbits); last = atomic_sub_and_test(nbits, &subpage->readers); /* * For data we need to unlock the page if the last read has finished. * * And please don't replace @last with atomic_sub_and_test() call * inside if () condition. * As we want the atomic_sub_and_test() to be always executed. */ if (is_data && last) folio_unlock(folio); spin_unlock_irqrestore(&subpage->lock, flags); } static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len) { u64 orig_start = *start; u32 orig_len = *len; *start = max_t(u64, folio_pos(folio), orig_start); /* * For certain call sites like btrfs_drop_pages(), we may have pages * beyond the target range. In that case, just set @len to 0, subpage * helpers can handle @len == 0 without any problem. */ if (folio_pos(folio) >= orig_start + orig_len) *len = 0; else *len = min_t(u64, folio_pos(folio) + PAGE_SIZE, orig_start + orig_len) - *start; } static void btrfs_subpage_start_writer(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); const int nbits = (len >> fs_info->sectorsize_bits); unsigned long flags; int ret; btrfs_subpage_assert(fs_info, folio, start, len); spin_lock_irqsave(&subpage->lock, flags); ASSERT(atomic_read(&subpage->readers) == 0); ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); bitmap_set(subpage->bitmaps, start_bit, nbits); ret = atomic_add_return(nbits, &subpage->writers); ASSERT(ret == nbits); spin_unlock_irqrestore(&subpage->lock, flags); } static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); const int nbits = (len >> fs_info->sectorsize_bits); unsigned long flags; bool last; btrfs_subpage_assert(fs_info, folio, start, len); spin_lock_irqsave(&subpage->lock, flags); /* * We have call sites passing @lock_page into * extent_clear_unlock_delalloc() for compression path. * * This @locked_page is locked by plain lock_page(), thus its * subpage::writers is 0. Handle them in a special way. */ if (atomic_read(&subpage->writers) == 0) { spin_unlock_irqrestore(&subpage->lock, flags); return true; } ASSERT(atomic_read(&subpage->writers) >= nbits); /* The target range should have been locked. */ ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits)); bitmap_clear(subpage->bitmaps, start_bit, nbits); last = atomic_sub_and_test(nbits, &subpage->writers); spin_unlock_irqrestore(&subpage->lock, flags); return last; } /* * Lock a folio for delalloc page writeback. * * Return -EAGAIN if the page is not properly initialized. * Return 0 with the page locked, and writer counter updated. * * Even with 0 returned, the page still need extra check to make sure * it's really the correct page, as the caller is using * filemap_get_folios_contig(), which can race with page invalidating. */ int btrfs_folio_start_writer_lock(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { folio_lock(folio); return 0; } folio_lock(folio); if (!folio_test_private(folio) || !folio_get_private(folio)) { folio_unlock(folio); return -EAGAIN; } btrfs_subpage_clamp_range(folio, &start, &len); btrfs_subpage_start_writer(fs_info, folio, start, len); return 0; } void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) { folio_unlock(folio); return; } btrfs_subpage_clamp_range(folio, &start, &len); if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len)) folio_unlock(folio); } #define subpage_test_bitmap_all_set(fs_info, subpage, name) \ bitmap_test_range_all_set(subpage->bitmaps, \ fs_info->subpage_info->name##_offset, \ fs_info->subpage_info->bitmap_nr_bits) #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \ bitmap_test_range_all_zero(subpage->bitmaps, \ fs_info->subpage_info->name##_offset, \ fs_info->subpage_info->bitmap_nr_bits) void btrfs_subpage_set_uptodate(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, uptodate, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate)) folio_mark_uptodate(folio); spin_unlock_irqrestore(&subpage->lock, flags); } void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, uptodate, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); folio_clear_uptodate(folio); spin_unlock_irqrestore(&subpage->lock, flags); } void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); spin_unlock_irqrestore(&subpage->lock, flags); folio_mark_dirty(folio); } /* * Extra clear_and_test function for subpage dirty bitmap. * * Return true if we're the last bits in the dirty_bitmap and clear the * dirty_bitmap. * Return false otherwise. * * NOTE: Callers should manually clear page dirty for true case, as we have * extra handling for tree blocks. */ bool btrfs_subpage_clear_and_test_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len); unsigned long flags; bool last = false; spin_lock_irqsave(&subpage->lock, flags); bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty)) last = true; spin_unlock_irqrestore(&subpage->lock, flags); return last; } void btrfs_subpage_clear_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { bool last; last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, start, len); if (last) folio_clear_dirty_for_io(folio); } void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, writeback, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (!folio_test_writeback(folio)) folio_start_writeback(folio); spin_unlock_irqrestore(&subpage->lock, flags); } void btrfs_subpage_clear_writeback(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, writeback, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) { ASSERT(folio_test_writeback(folio)); folio_end_writeback(folio); } spin_unlock_irqrestore(&subpage->lock, flags); } void btrfs_subpage_set_ordered(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, ordered, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); folio_set_ordered(folio); spin_unlock_irqrestore(&subpage->lock, flags); } void btrfs_subpage_clear_ordered(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, ordered, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered)) folio_clear_ordered(folio); spin_unlock_irqrestore(&subpage->lock, flags); } void btrfs_subpage_set_checked(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, checked, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); if (subpage_test_bitmap_all_set(fs_info, subpage, checked)) folio_set_checked(folio); spin_unlock_irqrestore(&subpage->lock, flags); } void btrfs_subpage_clear_checked(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage = folio_get_private(folio); unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, checked, start, len); unsigned long flags; spin_lock_irqsave(&subpage->lock, flags); bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); folio_clear_checked(folio); spin_unlock_irqrestore(&subpage->lock, flags); } /* * Unlike set/clear which is dependent on each page status, for test all bits * are tested in the same way. */ #define IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(name) \ bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ struct folio *folio, u64 start, u32 len) \ { \ struct btrfs_subpage *subpage = folio_get_private(folio); \ unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, \ name, start, len); \ unsigned long flags; \ bool ret; \ \ spin_lock_irqsave(&subpage->lock, flags); \ ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \ len >> fs_info->sectorsize_bits); \ spin_unlock_irqrestore(&subpage->lock, flags); \ return ret; \ } IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked); /* * Note that, in selftests (extent-io-tests), we can have empty fs_info passed * in. We only test sectorsize == PAGE_SIZE cases so far, thus we can fall * back to regular sectorsize branch. */ #define IMPLEMENT_BTRFS_PAGE_OPS(name, folio_set_func, \ folio_clear_func, folio_test_func) \ void btrfs_folio_set_##name(const struct btrfs_fs_info *fs_info, \ struct folio *folio, u64 start, u32 len) \ { \ if (unlikely(!fs_info) || \ !btrfs_is_subpage(fs_info, folio->mapping)) { \ folio_set_func(folio); \ return; \ } \ btrfs_subpage_set_##name(fs_info, folio, start, len); \ } \ void btrfs_folio_clear_##name(const struct btrfs_fs_info *fs_info, \ struct folio *folio, u64 start, u32 len) \ { \ if (unlikely(!fs_info) || \ !btrfs_is_subpage(fs_info, folio->mapping)) { \ folio_clear_func(folio); \ return; \ } \ btrfs_subpage_clear_##name(fs_info, folio, start, len); \ } \ bool btrfs_folio_test_##name(const struct btrfs_fs_info *fs_info, \ struct folio *folio, u64 start, u32 len) \ { \ if (unlikely(!fs_info) || \ !btrfs_is_subpage(fs_info, folio->mapping)) \ return folio_test_func(folio); \ return btrfs_subpage_test_##name(fs_info, folio, start, len); \ } \ void btrfs_folio_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ struct folio *folio, u64 start, u32 len) \ { \ if (unlikely(!fs_info) || \ !btrfs_is_subpage(fs_info, folio->mapping)) { \ folio_set_func(folio); \ return; \ } \ btrfs_subpage_clamp_range(folio, &start, &len); \ btrfs_subpage_set_##name(fs_info, folio, start, len); \ } \ void btrfs_folio_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ struct folio *folio, u64 start, u32 len) \ { \ if (unlikely(!fs_info) || \ !btrfs_is_subpage(fs_info, folio->mapping)) { \ folio_clear_func(folio); \ return; \ } \ btrfs_subpage_clamp_range(folio, &start, &len); \ btrfs_subpage_clear_##name(fs_info, folio, start, len); \ } \ bool btrfs_folio_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ struct folio *folio, u64 start, u32 len) \ { \ if (unlikely(!fs_info) || \ !btrfs_is_subpage(fs_info, folio->mapping)) \ return folio_test_func(folio); \ btrfs_subpage_clamp_range(folio, &start, &len); \ return btrfs_subpage_test_##name(fs_info, folio, start, len); \ } IMPLEMENT_BTRFS_PAGE_OPS(uptodate, folio_mark_uptodate, folio_clear_uptodate, folio_test_uptodate); IMPLEMENT_BTRFS_PAGE_OPS(dirty, folio_mark_dirty, folio_clear_dirty_for_io, folio_test_dirty); IMPLEMENT_BTRFS_PAGE_OPS(writeback, folio_start_writeback, folio_end_writeback, folio_test_writeback); IMPLEMENT_BTRFS_PAGE_OPS(ordered, folio_set_ordered, folio_clear_ordered, folio_test_ordered); IMPLEMENT_BTRFS_PAGE_OPS(checked, folio_set_checked, folio_clear_checked, folio_test_checked); /* * Make sure not only the page dirty bit is cleared, but also subpage dirty bit * is cleared. */ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage; unsigned int start_bit; unsigned int nbits; unsigned long flags; if (!IS_ENABLED(CONFIG_BTRFS_ASSERT)) return; if (!btrfs_is_subpage(fs_info, folio->mapping)) { ASSERT(!folio_test_dirty(folio)); return; } start_bit = subpage_calc_start_bit(fs_info, folio, dirty, start, len); nbits = len >> fs_info->sectorsize_bits; subpage = folio_get_private(folio); ASSERT(subpage); spin_lock_irqsave(&subpage->lock, flags); ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); spin_unlock_irqrestore(&subpage->lock, flags); } /* * Handle different locked pages with different page sizes: * * - Page locked by plain lock_page() * It should not have any subpage::writers count. * Can be unlocked by unlock_page(). * This is the most common locked page for __extent_writepage() called * inside extent_write_cache_pages(). * Rarer cases include the @locked_page from extent_write_locked_range(). * * - Page locked by lock_delalloc_pages() * There is only one caller, all pages except @locked_page for * extent_write_locked_range(). * In this case, we have to call subpage helper to handle the case. */ void btrfs_folio_unlock_writer(struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage; ASSERT(folio_test_locked(folio)); /* For non-subpage case, we just unlock the page */ if (!btrfs_is_subpage(fs_info, folio->mapping)) { folio_unlock(folio); return; } ASSERT(folio_test_private(folio) && folio_get_private(folio)); subpage = folio_get_private(folio); /* * For subpage case, there are two types of locked page. With or * without writers number. * * Since we own the page lock, no one else could touch subpage::writers * and we are safe to do several atomic operations without spinlock. */ if (atomic_read(&subpage->writers) == 0) { /* No writers, locked by plain lock_page() */ folio_unlock(folio); return; } /* Have writers, use proper subpage helper to end it */ btrfs_folio_end_writer_lock(fs_info, folio, start, len); } /* * This is for folio already locked by plain lock_page()/folio_lock(), which * doesn't have any subpage awareness. * * This populates the involved subpage ranges so that subpage helpers can * properly unlock them. */ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage *subpage; unsigned long flags; unsigned int start_bit; unsigned int nbits; int ret; ASSERT(folio_test_locked(folio)); if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, folio->mapping)) return; subpage = folio_get_private(folio); start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len); nbits = len >> fs_info->sectorsize_bits; spin_lock_irqsave(&subpage->lock, flags); /* Target range should not yet be locked. */ ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits)); bitmap_set(subpage->bitmaps, start_bit, nbits); ret = atomic_add_return(nbits, &subpage->writers); ASSERT(ret <= fs_info->subpage_info->bitmap_nr_bits); spin_unlock_irqrestore(&subpage->lock, flags); } /* * Find any subpage writer locked range inside @folio, starting at file offset * @search_start. The caller should ensure the folio is locked. * * Return true and update @found_start_ret and @found_len_ret to the first * writer locked range. * Return false if there is no writer locked range. */ bool btrfs_subpage_find_writer_locked(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 search_start, u64 *found_start_ret, u32 *found_len_ret) { struct btrfs_subpage_info *subpage_info = fs_info->subpage_info; struct btrfs_subpage *subpage = folio_get_private(folio); const unsigned int len = PAGE_SIZE - offset_in_page(search_start); const unsigned int start_bit = subpage_calc_start_bit(fs_info, folio, locked, search_start, len); const unsigned int locked_bitmap_start = subpage_info->locked_offset; const unsigned int locked_bitmap_end = locked_bitmap_start + subpage_info->bitmap_nr_bits; unsigned long flags; int first_zero; int first_set; bool found = false; ASSERT(folio_test_locked(folio)); spin_lock_irqsave(&subpage->lock, flags); first_set = find_next_bit(subpage->bitmaps, locked_bitmap_end, start_bit); if (first_set >= locked_bitmap_end) goto out; found = true; *found_start_ret = folio_pos(folio) + ((first_set - locked_bitmap_start) << fs_info->sectorsize_bits); /* * Since @first_set is ensured to be smaller than locked_bitmap_end * here, @found_start_ret should be inside the folio. */ ASSERT(*found_start_ret < folio_pos(folio) + PAGE_SIZE); first_zero = find_next_zero_bit(subpage->bitmaps, locked_bitmap_end, first_set); *found_len_ret = (first_zero - first_set) << fs_info->sectorsize_bits; out: spin_unlock_irqrestore(&subpage->lock, flags); return found; } /* * Unlike btrfs_folio_end_writer_lock() which unlocks a specified subpage range, * this ends all writer locked ranges of a page. * * This is for the locked page of __extent_writepage(), as the locked page * can contain several locked subpage ranges. */ void btrfs_folio_end_all_writers(const struct btrfs_fs_info *fs_info, struct folio *folio) { struct btrfs_subpage *subpage = folio_get_private(folio); u64 folio_start = folio_pos(folio); u64 cur = folio_start; ASSERT(folio_test_locked(folio)); if (!btrfs_is_subpage(fs_info, folio->mapping)) { folio_unlock(folio); return; } /* The page has no new delalloc range locked on it. Just plain unlock. */ if (atomic_read(&subpage->writers) == 0) { folio_unlock(folio); return; } while (cur < folio_start + PAGE_SIZE) { u64 found_start; u32 found_len; bool found; bool last; found = btrfs_subpage_find_writer_locked(fs_info, folio, cur, &found_start, &found_len); if (!found) break; last = btrfs_subpage_end_and_test_writer(fs_info, folio, found_start, found_len); if (last) { folio_unlock(folio); break; } cur = found_start + found_len; } } #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ bitmap_cut(dst, subpage->bitmaps, 0, \ subpage_info->name##_offset, subpage_info->bitmap_nr_bits) void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, struct folio *folio, u64 start, u32 len) { struct btrfs_subpage_info *subpage_info = fs_info->subpage_info; struct btrfs_subpage *subpage; unsigned long uptodate_bitmap; unsigned long dirty_bitmap; unsigned long writeback_bitmap; unsigned long ordered_bitmap; unsigned long checked_bitmap; unsigned long flags; ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(subpage_info); subpage = folio_get_private(folio); spin_lock_irqsave(&subpage->lock, flags); GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, locked, &checked_bitmap); spin_unlock_irqrestore(&subpage->lock, flags); dump_page(folio_page(folio, 0), "btrfs subpage dump"); btrfs_warn(fs_info, "start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl", start, len, folio_pos(folio), subpage_info->bitmap_nr_bits, &uptodate_bitmap, subpage_info->bitmap_nr_bits, &dirty_bitmap, subpage_info->bitmap_nr_bits, &writeback_bitmap, subpage_info->bitmap_nr_bits, &ordered_bitmap, subpage_info->bitmap_nr_bits, &checked_bitmap); } |
| 176 176 176 175 176 176 176 176 176 176 175 176 175 176 173 173 78 174 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 | // SPDX-License-Identifier: GPL-2.0-only /* * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and * Shaohua Li <shli@fb.com> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/init.h> #include "null_blk.h" #undef pr_fmt #define pr_fmt(fmt) "null_blk: " fmt #define FREE_BATCH 16 #define TICKS_PER_SEC 50ULL #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC) #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static DECLARE_FAULT_ATTR(null_timeout_attr); static DECLARE_FAULT_ATTR(null_requeue_attr); static DECLARE_FAULT_ATTR(null_init_hctx_attr); #endif static inline u64 mb_per_tick(int mbps) { return (1 << 20) / TICKS_PER_SEC * ((u64) mbps); } /* * Status flags for nullb_device. * * CONFIGURED: Device has been configured and turned on. Cannot reconfigure. * UP: Device is currently on and visible in userspace. * THROTTLED: Device is being throttled. * CACHE: Device is using a write-back cache. */ enum nullb_device_flags { NULLB_DEV_FL_CONFIGURED = 0, NULLB_DEV_FL_UP = 1, NULLB_DEV_FL_THROTTLED = 2, NULLB_DEV_FL_CACHE = 3, }; #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) /* * nullb_page is a page in memory for nullb devices. * * @page: The page holding the data. * @bitmap: The bitmap represents which sector in the page has data. * Each bit represents one block size. For example, sector 8 * will use the 7th bit * The highest 2 bits of bitmap are for special purpose. LOCK means the cache * page is being flushing to storage. FREE means the cache page is freed and * should be skipped from flushing to storage. Please see * null_make_cache_space */ struct nullb_page { struct page *page; DECLARE_BITMAP(bitmap, MAP_SZ); }; #define NULLB_PAGE_LOCK (MAP_SZ - 1) #define NULLB_PAGE_FREE (MAP_SZ - 2) static LIST_HEAD(nullb_list); static struct mutex lock; static int null_major; static DEFINE_IDA(nullb_indexes); static struct blk_mq_tag_set tag_set; enum { NULL_IRQ_NONE = 0, NULL_IRQ_SOFTIRQ = 1, NULL_IRQ_TIMER = 2, }; static bool g_virt_boundary; module_param_named(virt_boundary, g_virt_boundary, bool, 0444); MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False"); static int g_no_sched; module_param_named(no_sched, g_no_sched, int, 0444); MODULE_PARM_DESC(no_sched, "No io scheduler"); static int g_submit_queues = 1; module_param_named(submit_queues, g_submit_queues, int, 0444); MODULE_PARM_DESC(submit_queues, "Number of submission queues"); static int g_poll_queues = 1; module_param_named(poll_queues, g_poll_queues, int, 0444); MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues"); static int g_home_node = NUMA_NO_NODE; module_param_named(home_node, g_home_node, int, 0444); MODULE_PARM_DESC(home_node, "Home node for the device"); #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION /* * For more details about fault injection, please refer to * Documentation/fault-injection/fault-injection.rst. */ static char g_timeout_str[80]; module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444); MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>"); static char g_requeue_str[80]; module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444); MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>"); static char g_init_hctx_str[80]; module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444); MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>"); #endif /* * Historic queue modes. * * These days nothing but NULL_Q_MQ is actually supported, but we keep it the * enum for error reporting. */ enum { NULL_Q_BIO = 0, NULL_Q_RQ = 1, NULL_Q_MQ = 2, }; static int g_queue_mode = NULL_Q_MQ; static int null_param_store_val(const char *str, int *val, int min, int max) { int ret, new_val; ret = kstrtoint(str, 10, &new_val); if (ret) return -EINVAL; if (new_val < min || new_val > max) return -EINVAL; *val = new_val; return 0; } static int null_set_queue_mode(const char *str, const struct kernel_param *kp) { return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ); } static const struct kernel_param_ops null_queue_mode_param_ops = { .set = null_set_queue_mode, .get = param_get_int, }; device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444); MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); static int g_gb = 250; module_param_named(gb, g_gb, int, 0444); MODULE_PARM_DESC(gb, "Size in GB"); static int g_bs = 512; module_param_named(bs, g_bs, int, 0444); MODULE_PARM_DESC(bs, "Block size (in bytes)"); static int g_max_sectors; module_param_named(max_sectors, g_max_sectors, int, 0444); MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)"); static unsigned int nr_devices = 1; module_param(nr_devices, uint, 0444); MODULE_PARM_DESC(nr_devices, "Number of devices to register"); static bool g_blocking; module_param_named(blocking, g_blocking, bool, 0444); MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); static bool g_shared_tags; module_param_named(shared_tags, g_shared_tags, bool, 0444); MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); static bool g_shared_tag_bitmap; module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444); MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq"); static int g_irqmode = NULL_IRQ_SOFTIRQ; static int null_set_irqmode(const char *str, const struct kernel_param *kp) { return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE, NULL_IRQ_TIMER); } static const struct kernel_param_ops null_irqmode_param_ops = { .set = null_set_irqmode, .get = param_get_int, }; device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444); MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); static unsigned long g_completion_nsec = 10000; module_param_named(completion_nsec, g_completion_nsec, ulong, 0444); MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); static int g_hw_queue_depth = 64; module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444); MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); static bool g_use_per_node_hctx; module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444); MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); static bool g_memory_backed; module_param_named(memory_backed, g_memory_backed, bool, 0444); MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false"); static bool g_discard; module_param_named(discard, g_discard, bool, 0444); MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false"); static unsigned long g_cache_size; module_param_named(cache_size, g_cache_size, ulong, 0444); MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)"); static bool g_fua = true; module_param_named(fua, g_fua, bool, 0444); MODULE_PARM_DESC(fua, "Enable/disable FUA support when cache_size is used. Default: true"); static unsigned int g_mbps; module_param_named(mbps, g_mbps, uint, 0444); MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)"); static bool g_zoned; module_param_named(zoned, g_zoned, bool, S_IRUGO); MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false"); static unsigned long g_zone_size = 256; module_param_named(zone_size, g_zone_size, ulong, S_IRUGO); MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256"); static unsigned long g_zone_capacity; module_param_named(zone_capacity, g_zone_capacity, ulong, 0444); MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size"); static unsigned int g_zone_nr_conv; module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444); MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0"); static unsigned int g_zone_max_open; module_param_named(zone_max_open, g_zone_max_open, uint, 0444); MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)"); static unsigned int g_zone_max_active; module_param_named(zone_max_active, g_zone_max_active, uint, 0444); MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)"); static int g_zone_append_max_sectors = INT_MAX; module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444); MODULE_PARM_DESC(zone_append_max_sectors, "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation"); static bool g_zone_full; module_param_named(zone_full, g_zone_full, bool, S_IRUGO); MODULE_PARM_DESC(zone_full, "Initialize the sequential write required zones of a zoned device to be full. Default: false"); static struct nullb_device *null_alloc_dev(void); static void null_free_dev(struct nullb_device *dev); static void null_del_dev(struct nullb *nullb); static int null_add_dev(struct nullb_device *dev); static struct nullb *null_find_dev_by_name(const char *name); static void null_free_device_storage(struct nullb_device *dev, bool is_cache); static inline struct nullb_device *to_nullb_device(struct config_item *item) { return item ? container_of(to_config_group(item), struct nullb_device, group) : NULL; } static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", val); } static inline ssize_t nullb_device_ulong_attr_show(unsigned long val, char *page) { return snprintf(page, PAGE_SIZE, "%lu\n", val); } static inline ssize_t nullb_device_bool_attr_show(bool val, char *page) { return snprintf(page, PAGE_SIZE, "%u\n", val); } static ssize_t nullb_device_uint_attr_store(unsigned int *val, const char *page, size_t count) { unsigned int tmp; int result; result = kstrtouint(page, 0, &tmp); if (result < 0) return result; *val = tmp; return count; } static ssize_t nullb_device_ulong_attr_store(unsigned long *val, const char *page, size_t count) { int result; unsigned long tmp; result = kstrtoul(page, 0, &tmp); if (result < 0) return result; *val = tmp; return count; } static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, size_t count) { bool tmp; int result; result = kstrtobool(page, &tmp); if (result < 0) return result; *val = tmp; return count; } /* The following macro should only be used with TYPE = {uint, ulong, bool}. */ #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \ static ssize_t \ nullb_device_##NAME##_show(struct config_item *item, char *page) \ { \ return nullb_device_##TYPE##_attr_show( \ to_nullb_device(item)->NAME, page); \ } \ static ssize_t \ nullb_device_##NAME##_store(struct config_item *item, const char *page, \ size_t count) \ { \ int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\ struct nullb_device *dev = to_nullb_device(item); \ TYPE new_value = 0; \ int ret; \ \ ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\ if (ret < 0) \ return ret; \ if (apply_fn) \ ret = apply_fn(dev, new_value); \ else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \ ret = -EBUSY; \ if (ret < 0) \ return ret; \ dev->NAME = new_value; \ return count; \ } \ CONFIGFS_ATTR(nullb_device_, NAME); static int nullb_update_nr_hw_queues(struct nullb_device *dev, unsigned int submit_queues, unsigned int poll_queues) { struct blk_mq_tag_set *set; int ret, nr_hw_queues; if (!dev->nullb) return 0; /* * Make sure at least one submit queue exists. */ if (!submit_queues) return -EINVAL; /* * Make sure that null_init_hctx() does not access nullb->queues[] past * the end of that array. */ if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues) return -EINVAL; /* * Keep previous and new queue numbers in nullb_device for reference in * the call back function null_map_queues(). */ dev->prev_submit_queues = dev->submit_queues; dev->prev_poll_queues = dev->poll_queues; dev->submit_queues = submit_queues; dev->poll_queues = poll_queues; set = dev->nullb->tag_set; nr_hw_queues = submit_queues + poll_queues; blk_mq_update_nr_hw_queues(set, nr_hw_queues); ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM; if (ret) { /* on error, revert the queue numbers */ dev->submit_queues = dev->prev_submit_queues; dev->poll_queues = dev->prev_poll_queues; } return ret; } static int nullb_apply_submit_queues(struct nullb_device *dev, unsigned int submit_queues) { int ret; mutex_lock(&lock); ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); mutex_unlock(&lock); return ret; } static int nullb_apply_poll_queues(struct nullb_device *dev, unsigned int poll_queues) { int ret; mutex_lock(&lock); ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); mutex_unlock(&lock); return ret; } NULLB_DEVICE_ATTR(size, ulong, NULL); NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL); NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues); NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues); NULLB_DEVICE_ATTR(home_node, uint, NULL); NULLB_DEVICE_ATTR(queue_mode, uint, NULL); NULLB_DEVICE_ATTR(blocksize, uint, NULL); NULLB_DEVICE_ATTR(max_sectors, uint, NULL); NULLB_DEVICE_ATTR(irqmode, uint, NULL); NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL); NULLB_DEVICE_ATTR(index, uint, NULL); NULLB_DEVICE_ATTR(blocking, bool, NULL); NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL); NULLB_DEVICE_ATTR(memory_backed, bool, NULL); NULLB_DEVICE_ATTR(discard, bool, NULL); NULLB_DEVICE_ATTR(mbps, uint, NULL); NULLB_DEVICE_ATTR(cache_size, ulong, NULL); NULLB_DEVICE_ATTR(zoned, bool, NULL); NULLB_DEVICE_ATTR(zone_size, ulong, NULL); NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL); NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL); NULLB_DEVICE_ATTR(zone_max_open, uint, NULL); NULLB_DEVICE_ATTR(zone_max_active, uint, NULL); NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL); NULLB_DEVICE_ATTR(zone_full, bool, NULL); NULLB_DEVICE_ATTR(virt_boundary, bool, NULL); NULLB_DEVICE_ATTR(no_sched, bool, NULL); NULLB_DEVICE_ATTR(shared_tags, bool, NULL); NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL); NULLB_DEVICE_ATTR(fua, bool, NULL); static ssize_t nullb_device_power_show(struct config_item *item, char *page) { return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); } static ssize_t nullb_device_power_store(struct config_item *item, const char *page, size_t count) { struct nullb_device *dev = to_nullb_device(item); bool newp = false; ssize_t ret; ret = nullb_device_bool_attr_store(&newp, page, count); if (ret < 0) return ret; ret = count; mutex_lock(&lock); if (!dev->power && newp) { if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) goto out; ret = null_add_dev(dev); if (ret) { clear_bit(NULLB_DEV_FL_UP, &dev->flags); goto out; } set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); dev->power = newp; ret = count; } else if (dev->power && !newp) { if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { dev->power = newp; null_del_dev(dev->nullb); } clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); } out: mutex_unlock(&lock); return ret; } CONFIGFS_ATTR(nullb_device_, power); static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page) { struct nullb_device *t_dev = to_nullb_device(item); return badblocks_show(&t_dev->badblocks, page, 0); } static ssize_t nullb_device_badblocks_store(struct config_item *item, const char *page, size_t count) { struct nullb_device *t_dev = to_nullb_device(item); char *orig, *buf, *tmp; u64 start, end; int ret; orig = kstrndup(page, count, GFP_KERNEL); if (!orig) return -ENOMEM; buf = strstrip(orig); ret = -EINVAL; if (buf[0] != '+' && buf[0] != '-') goto out; tmp = strchr(&buf[1], '-'); if (!tmp) goto out; *tmp = '\0'; ret = kstrtoull(buf + 1, 0, &start); if (ret) goto out; ret = kstrtoull(tmp + 1, 0, &end); if (ret) goto out; ret = -EINVAL; if (start > end) goto out; /* enable badblocks */ cmpxchg(&t_dev->badblocks.shift, -1, 0); if (buf[0] == '+') ret = badblocks_set(&t_dev->badblocks, start, end - start + 1, 1); else ret = badblocks_clear(&t_dev->badblocks, start, end - start + 1); if (ret == 0) ret = count; out: kfree(orig); return ret; } CONFIGFS_ATTR(nullb_device_, badblocks); static ssize_t nullb_device_zone_readonly_store(struct config_item *item, const char *page, size_t count) { struct nullb_device *dev = to_nullb_device(item); return zone_cond_store(dev, page, count, BLK_ZONE_COND_READONLY); } CONFIGFS_ATTR_WO(nullb_device_, zone_readonly); static ssize_t nullb_device_zone_offline_store(struct config_item *item, const char *page, size_t count) { struct nullb_device *dev = to_nullb_device(item); return zone_cond_store(dev, page, count, BLK_ZONE_COND_OFFLINE); } CONFIGFS_ATTR_WO(nullb_device_, zone_offline); static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_size, &nullb_device_attr_completion_nsec, &nullb_device_attr_submit_queues, &nullb_device_attr_poll_queues, &nullb_device_attr_home_node, &nullb_device_attr_queue_mode, &nullb_device_attr_blocksize, &nullb_device_attr_max_sectors, &nullb_device_attr_irqmode, &nullb_device_attr_hw_queue_depth, &nullb_device_attr_index, &nullb_device_attr_blocking, &nullb_device_attr_use_per_node_hctx, &nullb_device_attr_power, &nullb_device_attr_memory_backed, &nullb_device_attr_discard, &nullb_device_attr_mbps, &nullb_device_attr_cache_size, &nullb_device_attr_badblocks, &nullb_device_attr_zoned, &nullb_device_attr_zone_size, &nullb_device_attr_zone_capacity, &nullb_device_attr_zone_nr_conv, &nullb_device_attr_zone_max_open, &nullb_device_attr_zone_max_active, &nullb_device_attr_zone_append_max_sectors, &nullb_device_attr_zone_readonly, &nullb_device_attr_zone_offline, &nullb_device_attr_zone_full, &nullb_device_attr_virt_boundary, &nullb_device_attr_no_sched, &nullb_device_attr_shared_tags, &nullb_device_attr_shared_tag_bitmap, &nullb_device_attr_fua, NULL, }; static void nullb_device_release(struct config_item *item) { struct nullb_device *dev = to_nullb_device(item); null_free_device_storage(dev, false); null_free_dev(dev); } static struct configfs_item_operations nullb_device_ops = { .release = nullb_device_release, }; static const struct config_item_type nullb_device_type = { .ct_item_ops = &nullb_device_ops, .ct_attrs = nullb_device_attrs, .ct_owner = THIS_MODULE, }; #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static void nullb_add_fault_config(struct nullb_device *dev) { fault_config_init(&dev->timeout_config, "timeout_inject"); fault_config_init(&dev->requeue_config, "requeue_inject"); fault_config_init(&dev->init_hctx_fault_config, "init_hctx_fault_inject"); configfs_add_default_group(&dev->timeout_config.group, &dev->group); configfs_add_default_group(&dev->requeue_config.group, &dev->group); configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group); } #else static void nullb_add_fault_config(struct nullb_device *dev) { } #endif static struct config_group *nullb_group_make_group(struct config_group *group, const char *name) { struct nullb_device *dev; if (null_find_dev_by_name(name)) return ERR_PTR(-EEXIST); dev = null_alloc_dev(); if (!dev) return ERR_PTR(-ENOMEM); config_group_init_type_name(&dev->group, name, &nullb_device_type); nullb_add_fault_config(dev); return &dev->group; } static void nullb_group_drop_item(struct config_group *group, struct config_item *item) { struct nullb_device *dev = to_nullb_device(item); if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { mutex_lock(&lock); dev->power = false; null_del_dev(dev->nullb); mutex_unlock(&lock); } config_item_put(item); } static ssize_t memb_group_features_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, "badblocks,blocking,blocksize,cache_size,fua," "completion_nsec,discard,home_node,hw_queue_depth," "irqmode,max_sectors,mbps,memory_backed,no_sched," "poll_queues,power,queue_mode,shared_tag_bitmap," "shared_tags,size,submit_queues,use_per_node_hctx," "virt_boundary,zoned,zone_capacity,zone_max_active," "zone_max_open,zone_nr_conv,zone_offline,zone_readonly," "zone_size,zone_append_max_sectors,zone_full\n"); } CONFIGFS_ATTR_RO(memb_group_, features); static struct configfs_attribute *nullb_group_attrs[] = { &memb_group_attr_features, NULL, }; static struct configfs_group_operations nullb_group_ops = { .make_group = nullb_group_make_group, .drop_item = nullb_group_drop_item, }; static const struct config_item_type nullb_group_type = { .ct_group_ops = &nullb_group_ops, .ct_attrs = nullb_group_attrs, .ct_owner = THIS_MODULE, }; static struct configfs_subsystem nullb_subsys = { .su_group = { .cg_item = { .ci_namebuf = "nullb", .ci_type = &nullb_group_type, }, }, }; static inline int null_cache_active(struct nullb *nullb) { return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); } static struct nullb_device *null_alloc_dev(void) { struct nullb_device *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return NULL; #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION dev->timeout_config.attr = null_timeout_attr; dev->requeue_config.attr = null_requeue_attr; dev->init_hctx_fault_config.attr = null_init_hctx_attr; #endif INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); if (badblocks_init(&dev->badblocks, 0)) { kfree(dev); return NULL; } dev->size = g_gb * 1024; dev->completion_nsec = g_completion_nsec; dev->submit_queues = g_submit_queues; dev->prev_submit_queues = g_submit_queues; dev->poll_queues = g_poll_queues; dev->prev_poll_queues = g_poll_queues; dev->home_node = g_home_node; dev->queue_mode = g_queue_mode; dev->blocksize = g_bs; dev->max_sectors = g_max_sectors; dev->irqmode = g_irqmode; dev->hw_queue_depth = g_hw_queue_depth; dev->blocking = g_blocking; dev->memory_backed = g_memory_backed; dev->discard = g_discard; dev->cache_size = g_cache_size; dev->mbps = g_mbps; dev->use_per_node_hctx = g_use_per_node_hctx; dev->zoned = g_zoned; dev->zone_size = g_zone_size; dev->zone_capacity = g_zone_capacity; dev->zone_nr_conv = g_zone_nr_conv; dev->zone_max_open = g_zone_max_open; dev->zone_max_active = g_zone_max_active; dev->zone_append_max_sectors = g_zone_append_max_sectors; dev->zone_full = g_zone_full; dev->virt_boundary = g_virt_boundary; dev->no_sched = g_no_sched; dev->shared_tags = g_shared_tags; dev->shared_tag_bitmap = g_shared_tag_bitmap; dev->fua = g_fua; return dev; } static void null_free_dev(struct nullb_device *dev) { if (!dev) return; null_free_zoned_dev(dev); badblocks_exit(&dev->badblocks); kfree(dev); } static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) { struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer); blk_mq_end_request(blk_mq_rq_from_pdu(cmd), cmd->error); return HRTIMER_NORESTART; } static void null_cmd_end_timer(struct nullb_cmd *cmd) { ktime_t kt = cmd->nq->dev->completion_nsec; hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); } static void null_complete_rq(struct request *rq) { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); blk_mq_end_request(rq, cmd->error); } static struct nullb_page *null_alloc_page(void) { struct nullb_page *t_page; t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO); if (!t_page) return NULL; t_page->page = alloc_pages(GFP_NOIO, 0); if (!t_page->page) { kfree(t_page); return NULL; } memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); return t_page; } static void null_free_page(struct nullb_page *t_page) { __set_bit(NULLB_PAGE_FREE, t_page->bitmap); if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) return; __free_page(t_page->page); kfree(t_page); } static bool null_page_empty(struct nullb_page *page) { int size = MAP_SZ - 2; return find_first_bit(page->bitmap, size) == size; } static void null_free_sector(struct nullb *nullb, sector_t sector, bool is_cache) { unsigned int sector_bit; u64 idx; struct nullb_page *t_page, *ret; struct radix_tree_root *root; root = is_cache ? &nullb->dev->cache : &nullb->dev->data; idx = sector >> PAGE_SECTORS_SHIFT; sector_bit = (sector & SECTOR_MASK); t_page = radix_tree_lookup(root, idx); if (t_page) { __clear_bit(sector_bit, t_page->bitmap); if (null_page_empty(t_page)) { ret = radix_tree_delete_item(root, idx, t_page); WARN_ON(ret != t_page); null_free_page(ret); if (is_cache) nullb->dev->curr_cache -= PAGE_SIZE; } } } static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, struct nullb_page *t_page, bool is_cache) { struct radix_tree_root *root; root = is_cache ? &nullb->dev->cache : &nullb->dev->data; if (radix_tree_insert(root, idx, t_page)) { null_free_page(t_page); t_page = radix_tree_lookup(root, idx); WARN_ON(!t_page || t_page->page->index != idx); } else if (is_cache) nullb->dev->curr_cache += PAGE_SIZE; return t_page; } static void null_free_device_storage(struct nullb_device *dev, bool is_cache) { unsigned long pos = 0; int nr_pages; struct nullb_page *ret, *t_pages[FREE_BATCH]; struct radix_tree_root *root; root = is_cache ? &dev->cache : &dev->data; do { int i; nr_pages = radix_tree_gang_lookup(root, (void **)t_pages, pos, FREE_BATCH); for (i = 0; i < nr_pages; i++) { pos = t_pages[i]->page->index; ret = radix_tree_delete_item(root, pos, t_pages[i]); WARN_ON(ret != t_pages[i]); null_free_page(ret); } pos++; } while (nr_pages == FREE_BATCH); if (is_cache) dev->curr_cache = 0; } static struct nullb_page *__null_lookup_page(struct nullb *nullb, sector_t sector, bool for_write, bool is_cache) { unsigned int sector_bit; u64 idx; struct nullb_page *t_page; struct radix_tree_root *root; idx = sector >> PAGE_SECTORS_SHIFT; sector_bit = (sector & SECTOR_MASK); root = is_cache ? &nullb->dev->cache : &nullb->dev->data; t_page = radix_tree_lookup(root, idx); WARN_ON(t_page && t_page->page->index != idx); if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) return t_page; return NULL; } static struct nullb_page *null_lookup_page(struct nullb *nullb, sector_t sector, bool for_write, bool ignore_cache) { struct nullb_page *page = NULL; if (!ignore_cache) page = __null_lookup_page(nullb, sector, for_write, true); if (page) return page; return __null_lookup_page(nullb, sector, for_write, false); } static struct nullb_page *null_insert_page(struct nullb *nullb, sector_t sector, bool ignore_cache) __releases(&nullb->lock) __acquires(&nullb->lock) { u64 idx; struct nullb_page *t_page; t_page = null_lookup_page(nullb, sector, true, ignore_cache); if (t_page) return t_page; spin_unlock_irq(&nullb->lock); t_page = null_alloc_page(); if (!t_page) goto out_lock; if (radix_tree_preload(GFP_NOIO)) goto out_freepage; spin_lock_irq(&nullb->lock); idx = sector >> PAGE_SECTORS_SHIFT; t_page->page->index = idx; t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); radix_tree_preload_end(); return t_page; out_freepage: null_free_page(t_page); out_lock: spin_lock_irq(&nullb->lock); return null_lookup_page(nullb, sector, true, ignore_cache); } static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) { int i; unsigned int offset; u64 idx; struct nullb_page *t_page, *ret; void *dst, *src; idx = c_page->page->index; t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { null_free_page(c_page); if (t_page && null_page_empty(t_page)) { ret = radix_tree_delete_item(&nullb->dev->data, idx, t_page); null_free_page(t_page); } return 0; } if (!t_page) return -ENOMEM; src = kmap_local_page(c_page->page); dst = kmap_local_page(t_page->page); for (i = 0; i < PAGE_SECTORS; i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { if (test_bit(i, c_page->bitmap)) { offset = (i << SECTOR_SHIFT); memcpy(dst + offset, src + offset, nullb->dev->blocksize); __set_bit(i, t_page->bitmap); } } kunmap_local(dst); kunmap_local(src); ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); null_free_page(ret); nullb->dev->curr_cache -= PAGE_SIZE; return 0; } static int null_make_cache_space(struct nullb *nullb, unsigned long n) { int i, err, nr_pages; struct nullb_page *c_pages[FREE_BATCH]; unsigned long flushed = 0, one_round; again: if ((nullb->dev->cache_size * 1024 * 1024) > nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) return 0; nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); /* * nullb_flush_cache_page could unlock before using the c_pages. To * avoid race, we don't allow page free */ for (i = 0; i < nr_pages; i++) { nullb->cache_flush_pos = c_pages[i]->page->index; /* * We found the page which is being flushed to disk by other * threads */ if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) c_pages[i] = NULL; else __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); } one_round = 0; for (i = 0; i < nr_pages; i++) { if (c_pages[i] == NULL) continue; err = null_flush_cache_page(nullb, c_pages[i]); if (err) return err; one_round++; } flushed += one_round << PAGE_SHIFT; if (n > flushed) { if (nr_pages == 0) nullb->cache_flush_pos = 0; if (one_round == 0) { /* give other threads a chance */ spin_unlock_irq(&nullb->lock); spin_lock_irq(&nullb->lock); } goto again; } return 0; } static int copy_to_nullb(struct nullb *nullb, struct page *source, unsigned int off, sector_t sector, size_t n, bool is_fua) { size_t temp, count = 0; unsigned int offset; struct nullb_page *t_page; while (count < n) { temp = min_t(size_t, nullb->dev->blocksize, n - count); if (null_cache_active(nullb) && !is_fua) null_make_cache_space(nullb, PAGE_SIZE); offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; t_page = null_insert_page(nullb, sector, !null_cache_active(nullb) || is_fua); if (!t_page) return -ENOSPC; memcpy_page(t_page->page, offset, source, off + count, temp); __set_bit(sector & SECTOR_MASK, t_page->bitmap); if (is_fua) null_free_sector(nullb, sector, true); count += temp; sector += temp >> SECTOR_SHIFT; } return 0; } static int copy_from_nullb(struct nullb *nullb, struct page *dest, unsigned int off, sector_t sector, size_t n) { size_t temp, count = 0; unsigned int offset; struct nullb_page *t_page; while (count < n) { temp = min_t(size_t, nullb->dev->blocksize, n - count); offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; t_page = null_lookup_page(nullb, sector, false, !null_cache_active(nullb)); if (t_page) memcpy_page(dest, off + count, t_page->page, offset, temp); else zero_user(dest, off + count, temp); count += temp; sector += temp >> SECTOR_SHIFT; } return 0; } static void nullb_fill_pattern(struct nullb *nullb, struct page *page, unsigned int len, unsigned int off) { memset_page(page, off, 0xff, len); } blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector, sector_t nr_sectors) { struct nullb *nullb = dev->nullb; size_t n = nr_sectors << SECTOR_SHIFT; size_t temp; spin_lock_irq(&nullb->lock); while (n > 0) { temp = min_t(size_t, n, dev->blocksize); null_free_sector(nullb, sector, false); if (null_cache_active(nullb)) null_free_sector(nullb, sector, true); sector += temp >> SECTOR_SHIFT; n -= temp; } spin_unlock_irq(&nullb->lock); return BLK_STS_OK; } static blk_status_t null_handle_flush(struct nullb *nullb) { int err; if (!null_cache_active(nullb)) return 0; spin_lock_irq(&nullb->lock); while (true) { err = null_make_cache_space(nullb, nullb->dev->cache_size * 1024 * 1024); if (err || nullb->dev->curr_cache == 0) break; } WARN_ON(!radix_tree_empty(&nullb->dev->cache)); spin_unlock_irq(&nullb->lock); return errno_to_blk_status(err); } static int null_transfer(struct nullb *nullb, struct page *page, unsigned int len, unsigned int off, bool is_write, sector_t sector, bool is_fua) { struct nullb_device *dev = nullb->dev; unsigned int valid_len = len; int err = 0; if (!is_write) { if (dev->zoned) valid_len = null_zone_valid_read_len(nullb, sector, len); if (valid_len) { err = copy_from_nullb(nullb, page, off, sector, valid_len); off += valid_len; len -= valid_len; } if (len) nullb_fill_pattern(nullb, page, len, off); flush_dcache_page(page); } else { flush_dcache_page(page); err = copy_to_nullb(nullb, page, off, sector, len, is_fua); } return err; } static blk_status_t null_handle_rq(struct nullb_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); struct nullb *nullb = cmd->nq->dev->nullb; int err = 0; unsigned int len; sector_t sector = blk_rq_pos(rq); struct req_iterator iter; struct bio_vec bvec; spin_lock_irq(&nullb->lock); rq_for_each_segment(bvec, rq, iter) { len = bvec.bv_len; err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, op_is_write(req_op(rq)), sector, rq->cmd_flags & REQ_FUA); if (err) break; sector += len >> SECTOR_SHIFT; } spin_unlock_irq(&nullb->lock); return errno_to_blk_status(err); } static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd) { struct nullb_device *dev = cmd->nq->dev; struct nullb *nullb = dev->nullb; blk_status_t sts = BLK_STS_OK; struct request *rq = blk_mq_rq_from_pdu(cmd); if (!hrtimer_active(&nullb->bw_timer)) hrtimer_restart(&nullb->bw_timer); if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) { blk_mq_stop_hw_queues(nullb->q); /* race with timer */ if (atomic_long_read(&nullb->cur_bytes) > 0) blk_mq_start_stopped_hw_queues(nullb->q, true); /* requeue request */ sts = BLK_STS_DEV_RESOURCE; } return sts; } static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector, sector_t nr_sectors) { struct badblocks *bb = &cmd->nq->dev->badblocks; sector_t first_bad; int bad_sectors; if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors)) return BLK_STS_IOERR; return BLK_STS_OK; } static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) { struct nullb_device *dev = cmd->nq->dev; if (op == REQ_OP_DISCARD) return null_handle_discard(dev, sector, nr_sectors); return null_handle_rq(cmd); } static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); struct nullb_device *dev = cmd->nq->dev; struct bio *bio; if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) { __rq_for_each_bio(bio, rq) zero_fill_bio(bio); } } static inline void nullb_complete_cmd(struct nullb_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); /* * Since root privileges are required to configure the null_blk * driver, it is fine that this driver does not initialize the * data buffers of read commands. Zero-initialize these buffers * anyway if KMSAN is enabled to prevent that KMSAN complains * about null_blk not initializing read data buffers. */ if (IS_ENABLED(CONFIG_KMSAN)) nullb_zero_read_cmd_buffer(cmd); /* Complete IO by inline, softirq or timer */ switch (cmd->nq->dev->irqmode) { case NULL_IRQ_SOFTIRQ: blk_mq_complete_request(rq); break; case NULL_IRQ_NONE: blk_mq_end_request(rq, cmd->error); break; case NULL_IRQ_TIMER: null_cmd_end_timer(cmd); break; } } blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, unsigned int nr_sectors) { struct nullb_device *dev = cmd->nq->dev; blk_status_t ret; if (dev->badblocks.shift != -1) { ret = null_handle_badblocks(cmd, sector, nr_sectors); if (ret != BLK_STS_OK) return ret; } if (dev->memory_backed) return null_handle_memory_backed(cmd, op, sector, nr_sectors); return BLK_STS_OK; } static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, sector_t nr_sectors, enum req_op op) { struct nullb_device *dev = cmd->nq->dev; struct nullb *nullb = dev->nullb; blk_status_t sts; if (op == REQ_OP_FLUSH) { cmd->error = null_handle_flush(nullb); goto out; } if (dev->zoned) sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors); else sts = null_process_cmd(cmd, op, sector, nr_sectors); /* Do not overwrite errors (e.g. timeout errors) */ if (cmd->error == BLK_STS_OK) cmd->error = sts; out: nullb_complete_cmd(cmd); } static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) { struct nullb *nullb = container_of(timer, struct nullb, bw_timer); ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); unsigned int mbps = nullb->dev->mbps; if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) return HRTIMER_NORESTART; atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); blk_mq_start_stopped_hw_queues(nullb->q, true); hrtimer_forward_now(&nullb->bw_timer, timer_interval); return HRTIMER_RESTART; } static void nullb_setup_bwtimer(struct nullb *nullb) { ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); nullb->bw_timer.function = nullb_bwtimer_fn; atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); } #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static bool should_timeout_request(struct request *rq) { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_device *dev = cmd->nq->dev; return should_fail(&dev->timeout_config.attr, 1); } static bool should_requeue_request(struct request *rq) { struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_device *dev = cmd->nq->dev; return should_fail(&dev->requeue_config.attr, 1); } static bool should_init_hctx_fail(struct nullb_device *dev) { return should_fail(&dev->init_hctx_fault_config.attr, 1); } #else static bool should_timeout_request(struct request *rq) { return false; } static bool should_requeue_request(struct request *rq) { return false; } static bool should_init_hctx_fail(struct nullb_device *dev) { return false; } #endif static void null_map_queues(struct blk_mq_tag_set *set) { struct nullb *nullb = set->driver_data; int i, qoff; unsigned int submit_queues = g_submit_queues; unsigned int poll_queues = g_poll_queues; if (nullb) { struct nullb_device *dev = nullb->dev; /* * Refer nr_hw_queues of the tag set to check if the expected * number of hardware queues are prepared. If block layer failed * to prepare them, use previous numbers of submit queues and * poll queues to map queues. */ if (set->nr_hw_queues == dev->submit_queues + dev->poll_queues) { submit_queues = dev->submit_queues; poll_queues = dev->poll_queues; } else if (set->nr_hw_queues == dev->prev_submit_queues + dev->prev_poll_queues) { submit_queues = dev->prev_submit_queues; poll_queues = dev->prev_poll_queues; } else { pr_warn("tag set has unexpected nr_hw_queues: %d\n", set->nr_hw_queues); WARN_ON_ONCE(true); submit_queues = 1; poll_queues = 0; } } for (i = 0, qoff = 0; i < set->nr_maps; i++) { struct blk_mq_queue_map *map = &set->map[i]; switch (i) { case HCTX_TYPE_DEFAULT: map->nr_queues = submit_queues; break; case HCTX_TYPE_READ: map->nr_queues = 0; continue; case HCTX_TYPE_POLL: map->nr_queues = poll_queues; break; } map->queue_offset = qoff; qoff += map->nr_queues; blk_mq_map_queues(map); } } static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nullb_queue *nq = hctx->driver_data; LIST_HEAD(list); int nr = 0; struct request *rq; spin_lock(&nq->poll_lock); list_splice_init(&nq->poll_list, &list); list_for_each_entry(rq, &list, queuelist) blk_mq_set_request_complete(rq); spin_unlock(&nq->poll_lock); while (!list_empty(&list)) { struct nullb_cmd *cmd; struct request *req; req = list_first_entry(&list, struct request, queuelist); list_del_init(&req->queuelist); cmd = blk_mq_rq_to_pdu(req); cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), blk_rq_sectors(req)); if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error, blk_mq_end_request_batch)) blk_mq_end_request(req, cmd->error); nr++; } return nr; } static enum blk_eh_timer_return null_timeout_rq(struct request *rq) { struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); if (hctx->type == HCTX_TYPE_POLL) { struct nullb_queue *nq = hctx->driver_data; spin_lock(&nq->poll_lock); /* The request may have completed meanwhile. */ if (blk_mq_request_completed(rq)) { spin_unlock(&nq->poll_lock); return BLK_EH_DONE; } list_del_init(&rq->queuelist); spin_unlock(&nq->poll_lock); } pr_info("rq %p timed out\n", rq); /* * If the device is marked as blocking (i.e. memory backed or zoned * device), the submission path may be blocked waiting for resources * and cause real timeouts. For these real timeouts, the submission * path will complete the request using blk_mq_complete_request(). * Only fake timeouts need to execute blk_mq_complete_request() here. */ cmd->error = BLK_STS_TIMEOUT; if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL) blk_mq_complete_request(rq); return BLK_EH_DONE; } static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { struct request *rq = bd->rq; struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_queue *nq = hctx->driver_data; sector_t nr_sectors = blk_rq_sectors(rq); sector_t sector = blk_rq_pos(rq); const bool is_poll = hctx->type == HCTX_TYPE_POLL; might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) { hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cmd->timer.function = null_cmd_timer_expired; } cmd->error = BLK_STS_OK; cmd->nq = nq; cmd->fake_timeout = should_timeout_request(rq) || blk_should_fake_timeout(rq->q); if (should_requeue_request(rq)) { /* * Alternate between hitting the core BUSY path, and the * driver driven requeue path */ nq->requeue_selection++; if (nq->requeue_selection & 1) return BLK_STS_RESOURCE; blk_mq_requeue_request(rq, true); return BLK_STS_OK; } if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) { blk_status_t sts = null_handle_throttled(cmd); if (sts != BLK_STS_OK) return sts; } blk_mq_start_request(rq); if (is_poll) { spin_lock(&nq->poll_lock); list_add_tail(&rq->queuelist, &nq->poll_list); spin_unlock(&nq->poll_lock); return BLK_STS_OK; } if (cmd->fake_timeout) return BLK_STS_OK; null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); return BLK_STS_OK; } static void null_queue_rqs(struct request **rqlist) { struct request *requeue_list = NULL; struct request **requeue_lastp = &requeue_list; struct blk_mq_queue_data bd = { }; blk_status_t ret; do { struct request *rq = rq_list_pop(rqlist); bd.rq = rq; ret = null_queue_rq(rq->mq_hctx, &bd); if (ret != BLK_STS_OK) rq_list_add_tail(&requeue_lastp, rq); } while (!rq_list_empty(*rqlist)); *rqlist = requeue_list; } static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { nq->dev = nullb->dev; INIT_LIST_HEAD(&nq->poll_list); spin_lock_init(&nq->poll_lock); } static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data, unsigned int hctx_idx) { struct nullb *nullb = hctx->queue->queuedata; struct nullb_queue *nq; if (should_init_hctx_fail(nullb->dev)) return -EFAULT; nq = &nullb->queues[hctx_idx]; hctx->driver_data = nq; null_init_queue(nullb, nq); return 0; } static const struct blk_mq_ops null_mq_ops = { .queue_rq = null_queue_rq, .queue_rqs = null_queue_rqs, .complete = null_complete_rq, .timeout = null_timeout_rq, .poll = null_poll, .map_queues = null_map_queues, .init_hctx = null_init_hctx, }; static void null_del_dev(struct nullb *nullb) { struct nullb_device *dev; if (!nullb) return; dev = nullb->dev; ida_free(&nullb_indexes, nullb->index); list_del_init(&nullb->list); del_gendisk(nullb->disk); if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { hrtimer_cancel(&nullb->bw_timer); atomic_long_set(&nullb->cur_bytes, LONG_MAX); blk_mq_start_stopped_hw_queues(nullb->q, true); } put_disk(nullb->disk); if (nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); kfree(nullb->queues); if (null_cache_active(nullb)) null_free_device_storage(nullb->dev, true); kfree(nullb); dev->nullb = NULL; } static void null_config_discard(struct nullb *nullb, struct queue_limits *lim) { if (nullb->dev->discard == false) return; if (!nullb->dev->memory_backed) { nullb->dev->discard = false; pr_info("discard option is ignored without memory backing\n"); return; } if (nullb->dev->zoned) { nullb->dev->discard = false; pr_info("discard option is ignored in zoned mode\n"); return; } lim->max_hw_discard_sectors = UINT_MAX >> 9; } static const struct block_device_operations null_ops = { .owner = THIS_MODULE, .report_zones = null_report_zones, }; static int setup_queues(struct nullb *nullb) { int nqueues = nr_cpu_ids; if (g_poll_queues) nqueues += g_poll_queues; nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue), GFP_KERNEL); if (!nullb->queues) return -ENOMEM; return 0; } static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues) { set->ops = &null_mq_ops; set->cmd_size = sizeof(struct nullb_cmd); set->timeout = 5 * HZ; set->nr_maps = 1; if (poll_queues) { set->nr_hw_queues += poll_queues; set->nr_maps += 2; } return blk_mq_alloc_tag_set(set); } static int null_init_global_tag_set(void) { int error; if (tag_set.ops) return 0; tag_set.nr_hw_queues = g_submit_queues; tag_set.queue_depth = g_hw_queue_depth; tag_set.numa_node = g_home_node; tag_set.flags = BLK_MQ_F_SHOULD_MERGE; if (g_no_sched) tag_set.flags |= BLK_MQ_F_NO_SCHED; if (g_shared_tag_bitmap) tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED; if (g_blocking) tag_set.flags |= BLK_MQ_F_BLOCKING; error = null_init_tag_set(&tag_set, g_poll_queues); if (error) tag_set.ops = NULL; return error; } static int null_setup_tagset(struct nullb *nullb) { if (nullb->dev->shared_tags) { nullb->tag_set = &tag_set; return null_init_global_tag_set(); } nullb->tag_set = &nullb->__tag_set; nullb->tag_set->driver_data = nullb; nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues; nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth; nullb->tag_set->numa_node = nullb->dev->home_node; nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE; if (nullb->dev->no_sched) nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED; if (nullb->dev->shared_tag_bitmap) nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; if (nullb->dev->blocking) nullb->tag_set->flags |= BLK_MQ_F_BLOCKING; return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues); } static int null_validate_conf(struct nullb_device *dev) { if (dev->queue_mode == NULL_Q_RQ) { pr_err("legacy IO path is no longer available\n"); return -EINVAL; } if (dev->queue_mode == NULL_Q_BIO) { pr_err("BIO-based IO path is no longer available, using blk-mq instead.\n"); dev->queue_mode = NULL_Q_MQ; } if (dev->use_per_node_hctx) { if (dev->submit_queues != nr_online_nodes) dev->submit_queues = nr_online_nodes; } else if (dev->submit_queues > nr_cpu_ids) dev->submit_queues = nr_cpu_ids; else if (dev->submit_queues == 0) dev->submit_queues = 1; dev->prev_submit_queues = dev->submit_queues; if (dev->poll_queues > g_poll_queues) dev->poll_queues = g_poll_queues; dev->prev_poll_queues = dev->poll_queues; dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); /* Do memory allocation, so set blocking */ if (dev->memory_backed) dev->blocking = true; else /* cache is meaningless */ dev->cache_size = 0; dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, dev->cache_size); dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); if (dev->zoned && (!dev->zone_size || !is_power_of_2(dev->zone_size))) { pr_err("zone_size must be power-of-two\n"); return -EINVAL; } return 0; } #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION static bool __null_setup_fault(struct fault_attr *attr, char *str) { if (!str[0]) return true; if (!setup_fault_attr(attr, str)) return false; attr->verbose = 0; return true; } #endif static bool null_setup_fault(void) { #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION if (!__null_setup_fault(&null_timeout_attr, g_timeout_str)) return false; if (!__null_setup_fault(&null_requeue_attr, g_requeue_str)) return false; if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str)) return false; #endif return true; } static int null_add_dev(struct nullb_device *dev) { struct queue_limits lim = { .logical_block_size = dev->blocksize, .physical_block_size = dev->blocksize, .max_hw_sectors = dev->max_sectors, }; struct nullb *nullb; int rv; rv = null_validate_conf(dev); if (rv) return rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); if (!nullb) { rv = -ENOMEM; goto out; } nullb->dev = dev; dev->nullb = nullb; spin_lock_init(&nullb->lock); rv = setup_queues(nullb); if (rv) goto out_free_nullb; rv = null_setup_tagset(nullb); if (rv) goto out_cleanup_queues; if (dev->virt_boundary) lim.virt_boundary_mask = PAGE_SIZE - 1; null_config_discard(nullb, &lim); if (dev->zoned) { rv = null_init_zoned_dev(dev, &lim); if (rv) goto out_cleanup_tags; } if (dev->cache_size > 0) { set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); lim.features |= BLK_FEAT_WRITE_CACHE; if (dev->fua) lim.features |= BLK_FEAT_FUA; } nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb); if (IS_ERR(nullb->disk)) { rv = PTR_ERR(nullb->disk); goto out_cleanup_zone; } nullb->q = nullb->disk->queue; if (dev->mbps) { set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); nullb_setup_bwtimer(nullb); } nullb->q->queuedata = nullb; rv = ida_alloc(&nullb_indexes, GFP_KERNEL); if (rv < 0) goto out_cleanup_disk; nullb->index = rv; dev->index = rv; if (config_item_name(&dev->group.cg_item)) { /* Use configfs dir name as the device name */ snprintf(nullb->disk_name, sizeof(nullb->disk_name), "%s", config_item_name(&dev->group.cg_item)); } else { sprintf(nullb->disk_name, "nullb%d", nullb->index); } set_capacity(nullb->disk, ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT); nullb->disk->major = null_major; nullb->disk->first_minor = nullb->index; nullb->disk->minors = 1; nullb->disk->fops = &null_ops; nullb->disk->private_data = nullb; strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN); if (nullb->dev->zoned) { rv = null_register_zoned_dev(nullb); if (rv) goto out_ida_free; } rv = add_disk(nullb->disk); if (rv) goto out_ida_free; list_add_tail(&nullb->list, &nullb_list); pr_info("disk %s created\n", nullb->disk_name); return 0; out_ida_free: ida_free(&nullb_indexes, nullb->index); out_cleanup_disk: put_disk(nullb->disk); out_cleanup_zone: null_free_zoned_dev(dev); out_cleanup_tags: if (nullb->tag_set == &nullb->__tag_set) blk_mq_free_tag_set(nullb->tag_set); out_cleanup_queues: kfree(nullb->queues); out_free_nullb: kfree(nullb); dev->nullb = NULL; out: return rv; } static struct nullb *null_find_dev_by_name(const char *name) { struct nullb *nullb = NULL, *nb; mutex_lock(&lock); list_for_each_entry(nb, &nullb_list, list) { if (strcmp(nb->disk_name, name) == 0) { nullb = nb; break; } } mutex_unlock(&lock); return nullb; } static int null_create_dev(void) { struct nullb_device *dev; int ret; dev = null_alloc_dev(); if (!dev) return -ENOMEM; mutex_lock(&lock); ret = null_add_dev(dev); mutex_unlock(&lock); if (ret) { null_free_dev(dev); return ret; } return 0; } static void null_destroy_dev(struct nullb *nullb) { struct nullb_device *dev = nullb->dev; null_del_dev(nullb); null_free_device_storage(dev, false); null_free_dev(dev); } static int __init null_init(void) { int ret = 0; unsigned int i; struct nullb *nullb; if (g_bs > PAGE_SIZE) { pr_warn("invalid block size\n"); pr_warn("defaults block size to %lu\n", PAGE_SIZE); g_bs = PAGE_SIZE; } if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { pr_err("invalid home_node value\n"); g_home_node = NUMA_NO_NODE; } if (!null_setup_fault()) return -EINVAL; if (g_queue_mode == NULL_Q_RQ) { pr_err("legacy IO path is no longer available\n"); return -EINVAL; } if (g_use_per_node_hctx) { if (g_submit_queues != nr_online_nodes) { pr_warn("submit_queues param is set to %u.\n", nr_online_nodes); g_submit_queues = nr_online_nodes; } } else if (g_submit_queues > nr_cpu_ids) { g_submit_queues = nr_cpu_ids; } else if (g_submit_queues <= 0) { g_submit_queues = 1; } config_group_init(&nullb_subsys.su_group); mutex_init(&nullb_subsys.su_mutex); ret = configfs_register_subsystem(&nullb_subsys); if (ret) return ret; mutex_init(&lock); null_major = register_blkdev(0, "nullb"); if (null_major < 0) { ret = null_major; goto err_conf; } for (i = 0; i < nr_devices; i++) { ret = null_create_dev(); if (ret) goto err_dev; } pr_info("module loaded\n"); return 0; err_dev: while (!list_empty(&nullb_list)) { nullb = list_entry(nullb_list.next, struct nullb, list); null_destroy_dev(nullb); } unregister_blkdev(null_major, "nullb"); err_conf: configfs_unregister_subsystem(&nullb_subsys); return ret; } static void __exit null_exit(void) { struct nullb *nullb; configfs_unregister_subsystem(&nullb_subsys); unregister_blkdev(null_major, "nullb"); mutex_lock(&lock); while (!list_empty(&nullb_list)) { nullb = list_entry(nullb_list.next, struct nullb, list); null_destroy_dev(nullb); } mutex_unlock(&lock); if (tag_set.ops) blk_mq_free_tag_set(&tag_set); mutex_destroy(&lock); } module_init(null_init); module_exit(null_exit); MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>"); MODULE_DESCRIPTION("multi queue aware block test driver"); MODULE_LICENSE("GPL"); |
| 2 2 2 2 19 19 19 19 19 19 19 19 5 5 5 5 5 4 20 4 4 4 4 4 4 10 3 1 1 1 4 4 4 2 2 2 2 2 4 2 16 1 1 4 4 1 9 9 1 4 4 4 4 4 4 4 4 16 16 4 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/sch_gred.c Generic Random Early Detection queue. * * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002 * * 991129: - Bug fix with grio mode * - a better sing. AvgQ mode with Grio(WRED) * - A finer grained VQ dequeue based on suggestion * from Ren Liu * - More error checks * * For all the glorious comments look at include/net/red.h */ #include <linux/slab.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <net/pkt_cls.h> #include <net/pkt_sched.h> #include <net/red.h> #define GRED_DEF_PRIO (MAX_DPs / 2) #define GRED_VQ_MASK (MAX_DPs - 1) #define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP) struct gred_sched_data; struct gred_sched; struct gred_sched_data { u32 limit; /* HARD maximal queue length */ u32 DP; /* the drop parameters */ u32 red_flags; /* virtualQ version of red_flags */ u64 bytesin; /* bytes seen on virtualQ so far*/ u32 packetsin; /* packets seen on virtualQ so far*/ u32 backlog; /* bytes on the virtualQ */ u8 prio; /* the prio of this vq */ struct red_parms parms; struct red_vars vars; struct red_stats stats; }; enum { GRED_WRED_MODE = 1, GRED_RIO_MODE, }; struct gred_sched { struct gred_sched_data *tab[MAX_DPs]; unsigned long flags; u32 red_flags; u32 DPs; u32 def; struct red_vars wred_set; struct tc_gred_qopt_offload *opt; }; static inline int gred_wred_mode(struct gred_sched *table) { return test_bit(GRED_WRED_MODE, &table->flags); } static inline void gred_enable_wred_mode(struct gred_sched *table) { __set_bit(GRED_WRED_MODE, &table->flags); } static inline void gred_disable_wred_mode(struct gred_sched *table) { __clear_bit(GRED_WRED_MODE, &table->flags); } static inline int gred_rio_mode(struct gred_sched *table) { return test_bit(GRED_RIO_MODE, &table->flags); } static inline void gred_enable_rio_mode(struct gred_sched *table) { __set_bit(GRED_RIO_MODE, &table->flags); } static inline void gred_disable_rio_mode(struct gred_sched *table) { __clear_bit(GRED_RIO_MODE, &table->flags); } static inline int gred_wred_mode_check(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); int i; /* Really ugly O(n^2) but shouldn't be necessary too frequent. */ for (i = 0; i < table->DPs; i++) { struct gred_sched_data *q = table->tab[i]; int n; if (q == NULL) continue; for (n = i + 1; n < table->DPs; n++) if (table->tab[n] && table->tab[n]->prio == q->prio) return 1; } return 0; } static inline unsigned int gred_backlog(struct gred_sched *table, struct gred_sched_data *q, struct Qdisc *sch) { if (gred_wred_mode(table)) return sch->qstats.backlog; else return q->backlog; } static inline u16 tc_index_to_dp(struct sk_buff *skb) { return skb->tc_index & GRED_VQ_MASK; } static inline void gred_load_wred_set(const struct gred_sched *table, struct gred_sched_data *q) { q->vars.qavg = table->wred_set.qavg; q->vars.qidlestart = table->wred_set.qidlestart; } static inline void gred_store_wred_set(struct gred_sched *table, struct gred_sched_data *q) { table->wred_set.qavg = q->vars.qavg; table->wred_set.qidlestart = q->vars.qidlestart; } static int gred_use_ecn(struct gred_sched_data *q) { return q->red_flags & TC_RED_ECN; } static int gred_use_harddrop(struct gred_sched_data *q) { return q->red_flags & TC_RED_HARDDROP; } static bool gred_per_vq_red_flags_used(struct gred_sched *table) { unsigned int i; /* Local per-vq flags couldn't have been set unless global are 0 */ if (table->red_flags) return false; for (i = 0; i < MAX_DPs; i++) if (table->tab[i] && table->tab[i]->red_flags) return true; return false; } static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct gred_sched_data *q = NULL; struct gred_sched *t = qdisc_priv(sch); unsigned long qavg = 0; u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { dp = t->def; q = t->tab[dp]; if (!q) { /* Pass through packets not assigned to a DP * if no default DP has been configured. This * allows for DP flows to be left untouched. */ if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) return qdisc_enqueue_tail(skb, sch); else goto drop; } /* fix tc_index? --could be controversial but needed for requeueing */ skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; } /* sum up all the qaves of prios < ours to get the new qave */ if (!gred_wred_mode(t) && gred_rio_mode(t)) { int i; for (i = 0; i < t->DPs; i++) { if (t->tab[i] && t->tab[i]->prio < q->prio && !red_is_idling(&t->tab[i]->vars)) qavg += t->tab[i]->vars.qavg; } } q->packetsin++; q->bytesin += qdisc_pkt_len(skb); if (gred_wred_mode(t)) gred_load_wred_set(t, q); q->vars.qavg = red_calc_qavg(&q->parms, &q->vars, gred_backlog(t, q, sch)); if (red_is_idling(&q->vars)) red_end_of_idle_period(&q->vars); if (gred_wred_mode(t)) gred_store_wred_set(t, q); switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: qdisc_qstats_overlimit(sch); if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; } q->stats.prob_mark++; break; case RED_HARD_MARK: qdisc_qstats_overlimit(sch); if (gred_use_harddrop(q) || !gred_use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; goto congestion_drop; } q->stats.forced_mark++; break; } if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) { q->backlog += qdisc_pkt_len(skb); return qdisc_enqueue_tail(skb, sch); } q->stats.pdrop++; drop: return qdisc_drop(skb, sch, to_free); congestion_drop: qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; } static struct sk_buff *gred_dequeue(struct Qdisc *sch) { struct sk_buff *skb; struct gred_sched *t = qdisc_priv(sch); skb = qdisc_dequeue_head(sch); if (skb) { struct gred_sched_data *q; u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n", tc_index_to_dp(skb)); } else { q->backlog -= qdisc_pkt_len(skb); if (gred_wred_mode(t)) { if (!sch->qstats.backlog) red_start_of_idle_period(&t->wred_set); } else { if (!q->backlog) red_start_of_idle_period(&q->vars); } } return skb; } return NULL; } static void gred_reset(struct Qdisc *sch) { int i; struct gred_sched *t = qdisc_priv(sch); qdisc_reset_queue(sch); for (i = 0; i < t->DPs; i++) { struct gred_sched_data *q = t->tab[i]; if (!q) continue; red_restart(&q->vars); q->backlog = 0; } } static void gred_offload(struct Qdisc *sch, enum tc_gred_command command) { struct gred_sched *table = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct tc_gred_qopt_offload *opt = table->opt; if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) return; memset(opt, 0, sizeof(*opt)); opt->command = command; opt->handle = sch->handle; opt->parent = sch->parent; if (command == TC_GRED_REPLACE) { unsigned int i; opt->set.grio_on = gred_rio_mode(table); opt->set.wred_on = gred_wred_mode(table); opt->set.dp_cnt = table->DPs; opt->set.dp_def = table->def; for (i = 0; i < table->DPs; i++) { struct gred_sched_data *q = table->tab[i]; if (!q) continue; opt->set.tab[i].present = true; opt->set.tab[i].limit = q->limit; opt->set.tab[i].prio = q->prio; opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; opt->set.tab[i].is_ecn = gred_use_ecn(q); opt->set.tab[i].is_harddrop = gred_use_harddrop(q); opt->set.tab[i].probability = q->parms.max_P; opt->set.tab[i].backlog = &q->backlog; } opt->set.qstats = &sch->qstats; } dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt); } static int gred_offload_dump_stats(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_qopt_offload *hw_stats; u64 bytes = 0, packets = 0; unsigned int i; int ret; hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL); if (!hw_stats) return -ENOMEM; hw_stats->command = TC_GRED_STATS; hw_stats->handle = sch->handle; hw_stats->parent = sch->parent; for (i = 0; i < MAX_DPs; i++) { gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]); if (table->tab[i]) hw_stats->stats.xstats[i] = &table->tab[i]->stats; } ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats); /* Even if driver returns failure adjust the stats - in case offload * ended but driver still wants to adjust the values. */ sch_tree_lock(sch); for (i = 0; i < MAX_DPs; i++) { if (!table->tab[i]) continue; table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets); table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes); table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes); packets += u64_stats_read(&hw_stats->stats.bstats[i].packets); sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; sch->qstats.drops += hw_stats->stats.qstats[i].drops; sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; } _bstats_update(&sch->bstats, bytes, packets); sch_tree_unlock(sch); kfree(hw_stats); return ret; } static inline void gred_destroy_vq(struct gred_sched_data *q) { kfree(q); } static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps, struct netlink_ext_ack *extack) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_sopt *sopt; bool red_flags_changed; int i; if (!dps) return -EINVAL; sopt = nla_data(dps); if (sopt->DPs > MAX_DPs) { NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high"); return -EINVAL; } if (sopt->DPs == 0) { NL_SET_ERR_MSG_MOD(extack, "number of virtual queues can't be 0"); return -EINVAL; } if (sopt->def_DP >= sopt->DPs) { NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count"); return -EINVAL; } if (sopt->flags && gred_per_vq_red_flags_used(table)) { NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used"); return -EINVAL; } sch_tree_lock(sch); table->DPs = sopt->DPs; table->def = sopt->def_DP; red_flags_changed = table->red_flags != sopt->flags; table->red_flags = sopt->flags; /* * Every entry point to GRED is synchronized with the above code * and the DP is checked against DPs, i.e. shadowed VQs can no * longer be found so we can unlock right here. */ sch_tree_unlock(sch); if (sopt->grio) { gred_enable_rio_mode(table); gred_disable_wred_mode(table); if (gred_wred_mode_check(sch)) gred_enable_wred_mode(table); } else { gred_disable_rio_mode(table); gred_disable_wred_mode(table); } if (red_flags_changed) for (i = 0; i < table->DPs; i++) if (table->tab[i]) table->tab[i]->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; for (i = table->DPs; i < MAX_DPs; i++) { if (table->tab[i]) { pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n", i); gred_destroy_vq(table->tab[i]); table->tab[i] = NULL; } } gred_offload(sch, TC_GRED_REPLACE); return 0; } static inline int gred_change_vq(struct Qdisc *sch, int dp, struct tc_gred_qopt *ctl, int prio, u8 *stab, u32 max_P, struct gred_sched_data **prealloc, struct netlink_ext_ack *extack) { struct gred_sched *table = qdisc_priv(sch); struct gred_sched_data *q = table->tab[dp]; if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) { NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters"); return -EINVAL; } if (!q) { table->tab[dp] = q = *prealloc; *prealloc = NULL; if (!q) return -ENOMEM; q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; } q->DP = dp; q->prio = prio; if (ctl->limit > sch->limit) q->limit = sch->limit; else q->limit = ctl->limit; if (q->backlog == 0) red_end_of_idle_period(&q->vars); red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, stab, max_P); red_set_vars(&q->vars); return 0; } static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = { [TCA_GRED_VQ_DP] = { .type = NLA_U32 }, [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 }, }; static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = { [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED }, }; static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = { [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) }, [TCA_GRED_STAB] = { .len = 256 }, [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) }, [TCA_GRED_MAX_P] = { .type = NLA_U32 }, [TCA_GRED_LIMIT] = { .type = NLA_U32 }, [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED }, }; static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry) { struct nlattr *tb[TCA_GRED_VQ_MAX + 1]; u32 dp; nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy, NULL); dp = nla_get_u32(tb[TCA_GRED_VQ_DP]); if (tb[TCA_GRED_VQ_FLAGS]) table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); } static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs) { const struct nlattr *attr; int rem; nla_for_each_nested(attr, vqs, rem) { switch (nla_type(attr)) { case TCA_GRED_VQ_ENTRY: gred_vq_apply(table, attr); break; } } } static int gred_vq_validate(struct gred_sched *table, u32 cdp, const struct nlattr *entry, struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_GRED_VQ_MAX + 1]; int err; u32 dp; err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy, extack); if (err < 0) return err; if (!tb[TCA_GRED_VQ_DP]) { NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified"); return -EINVAL; } dp = nla_get_u32(tb[TCA_GRED_VQ_DP]); if (dp >= table->DPs) { NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds"); return -EINVAL; } if (dp != cdp && !table->tab[dp]) { NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated"); return -EINVAL; } if (tb[TCA_GRED_VQ_FLAGS]) { u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); if (table->red_flags && table->red_flags != red_flags) { NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used"); return -EINVAL; } if (red_flags & ~GRED_VQ_RED_FLAGS) { NL_SET_ERR_MSG_MOD(extack, "invalid RED flags specified"); return -EINVAL; } } return 0; } static int gred_vqs_validate(struct gred_sched *table, u32 cdp, struct nlattr *vqs, struct netlink_ext_ack *extack) { const struct nlattr *attr; int rem, err; err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX, gred_vqe_policy, extack); if (err < 0) return err; nla_for_each_nested(attr, vqs, rem) { switch (nla_type(attr)) { case TCA_GRED_VQ_ENTRY: err = gred_vq_validate(table, cdp, attr, extack); if (err) return err; break; default: NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes"); return -EINVAL; } } if (rem > 0) { NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list"); return -EINVAL; } return 0; } static int gred_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct gred_sched *table = qdisc_priv(sch); struct tc_gred_qopt *ctl; struct nlattr *tb[TCA_GRED_MAX + 1]; int err, prio = GRED_DEF_PRIO; u8 *stab; u32 max_P; struct gred_sched_data *prealloc; err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, extack); if (err < 0) return err; if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) { if (tb[TCA_GRED_LIMIT] != NULL) sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); } if (tb[TCA_GRED_PARMS] == NULL || tb[TCA_GRED_STAB] == NULL || tb[TCA_GRED_LIMIT] != NULL) { NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time"); return -EINVAL; } max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; ctl = nla_data(tb[TCA_GRED_PARMS]); stab = nla_data(tb[TCA_GRED_STAB]); if (ctl->DP >= table->DPs) { NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count"); return -EINVAL; } if (tb[TCA_GRED_VQ_LIST]) { err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST], extack); if (err) return err; } if (gred_rio_mode(table)) { if (ctl->prio == 0) { int def_prio = GRED_DEF_PRIO; if (table->tab[table->def]) def_prio = table->tab[table->def]->prio; printk(KERN_DEBUG "GRED: DP %u does not have a prio " "setting default to %d\n", ctl->DP, def_prio); prio = def_prio; } else prio = ctl->prio; } prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); sch_tree_lock(sch); err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc, extack); if (err < 0) goto err_unlock_free; if (tb[TCA_GRED_VQ_LIST]) gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]); if (gred_rio_mode(table)) { gred_disable_wred_mode(table); if (gred_wred_mode_check(sch)) gred_enable_wred_mode(table); } sch_tree_unlock(sch); kfree(prealloc); gred_offload(sch, TC_GRED_REPLACE); return 0; err_unlock_free: sch_tree_unlock(sch); kfree(prealloc); return err; } static int gred_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct gred_sched *table = qdisc_priv(sch); struct nlattr *tb[TCA_GRED_MAX + 1]; int err; if (!opt) return -EINVAL; err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, extack); if (err < 0) return err; if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) { NL_SET_ERR_MSG_MOD(extack, "virtual queue configuration can't be specified at initialization time"); return -EINVAL; } if (tb[TCA_GRED_LIMIT]) sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); else sch->limit = qdisc_dev(sch)->tx_queue_len * psched_mtu(qdisc_dev(sch)); if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) { table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL); if (!table->opt) return -ENOMEM; } return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); } static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) { struct gred_sched *table = qdisc_priv(sch); struct nlattr *parms, *vqs, *opts = NULL; int i; u32 max_p[MAX_DPs]; struct tc_gred_sopt sopt = { .DPs = table->DPs, .def_DP = table->def, .grio = gred_rio_mode(table), .flags = table->red_flags, }; if (gred_offload_dump_stats(sch)) goto nla_put_failure; opts = nla_nest_start_noflag(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt)) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; max_p[i] = q ? q->parms.max_P : 0; } if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) goto nla_put_failure; /* Old style all-in-one dump of VQs */ parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS); if (parms == NULL) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct tc_gred_qopt opt; unsigned long qavg; memset(&opt, 0, sizeof(opt)); if (!q) { /* hack -- fix at some point with proper message This is how we indicate to tc that there is no VQ at this DP */ opt.DP = MAX_DPs + i; goto append_opt; } opt.limit = q->limit; opt.DP = q->DP; opt.backlog = gred_backlog(table, q, sch); opt.prio = q->prio; opt.qth_min = q->parms.qth_min >> q->parms.Wlog; opt.qth_max = q->parms.qth_max >> q->parms.Wlog; opt.Wlog = q->parms.Wlog; opt.Plog = q->parms.Plog; opt.Scell_log = q->parms.Scell_log; opt.early = q->stats.prob_drop; opt.forced = q->stats.forced_drop; opt.pdrop = q->stats.pdrop; opt.packets = q->packetsin; opt.bytesin = q->bytesin; if (gred_wred_mode(table)) gred_load_wred_set(table, q); qavg = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg >> q->parms.Wlog); opt.qave = qavg >> q->parms.Wlog; append_opt: if (nla_append(skb, sizeof(opt), &opt) < 0) goto nla_put_failure; } nla_nest_end(skb, parms); /* Dump the VQs again, in more structured way */ vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST); if (!vqs) goto nla_put_failure; for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct nlattr *vq; if (!q) continue; vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY); if (!vq) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags)) goto nla_put_failure; /* Stats */ if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin, TCA_GRED_VQ_PAD)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG, gred_backlog(table, q, sch))) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP, q->stats.prob_drop)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK, q->stats.prob_mark)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP, q->stats.forced_drop)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK, q->stats.forced_mark)) goto nla_put_failure; if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop)) goto nla_put_failure; nla_nest_end(skb, vq); } nla_nest_end(skb, vqs); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static void gred_destroy(struct Qdisc *sch) { struct gred_sched *table = qdisc_priv(sch); int i; for (i = 0; i < table->DPs; i++) gred_destroy_vq(table->tab[i]); gred_offload(sch, TC_GRED_DESTROY); kfree(table->opt); } static struct Qdisc_ops gred_qdisc_ops __read_mostly = { .id = "gred", .priv_size = sizeof(struct gred_sched), .enqueue = gred_enqueue, .dequeue = gred_dequeue, .peek = qdisc_peek_head, .init = gred_init, .reset = gred_reset, .destroy = gred_destroy, .change = gred_change, .dump = gred_dump, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("gred"); static int __init gred_module_init(void) { return register_qdisc(&gred_qdisc_ops); } static void __exit gred_module_exit(void) { unregister_qdisc(&gred_qdisc_ops); } module_init(gred_module_init) module_exit(gred_module_exit) MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Generic Random Early Detection qdisc"); |
| 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 | // SPDX-License-Identifier: GPL-2.0-only /* * blockcheck.c * * Checksum and ECC codes for the OCFS2 userspace library. * * Copyright (C) 2006, 2008 Oracle. All rights reserved. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/crc32.h> #include <linux/buffer_head.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/fs.h> #include <asm/byteorder.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "blockcheck.h" /* * We use the following conventions: * * d = # data bits * p = # parity bits * c = # total code bits (d + p) */ /* * Calculate the bit offset in the hamming code buffer based on the bit's * offset in the data buffer. Since the hamming code reserves all * power-of-two bits for parity, the data bit number and the code bit * number are offset by all the parity bits beforehand. * * Recall that bit numbers in hamming code are 1-based. This function * takes the 0-based data bit from the caller. * * An example. Take bit 1 of the data buffer. 1 is a power of two (2^0), * so it's a parity bit. 2 is a power of two (2^1), so it's a parity bit. * 3 is not a power of two. So bit 1 of the data buffer ends up as bit 3 * in the code buffer. * * The caller can pass in *p if it wants to keep track of the most recent * number of parity bits added. This allows the function to start the * calculation at the last place. */ static unsigned int calc_code_bit(unsigned int i, unsigned int *p_cache) { unsigned int b, p = 0; /* * Data bits are 0-based, but we're talking code bits, which * are 1-based. */ b = i + 1; /* Use the cache if it is there */ if (p_cache) p = *p_cache; b += p; /* * For every power of two below our bit number, bump our bit. * * We compare with (b + 1) because we have to compare with what b * would be _if_ it were bumped up by the parity bit. Capice? * * p is set above. */ for (; (1 << p) < (b + 1); p++) b++; if (p_cache) *p_cache = p; return b; } /* * This is the low level encoder function. It can be called across * multiple hunks just like the crc32 code. 'd' is the number of bits * _in_this_hunk_. nr is the bit offset of this hunk. So, if you had * two 512B buffers, you would do it like so: * * parity = ocfs2_hamming_encode(0, buf1, 512 * 8, 0); * parity = ocfs2_hamming_encode(parity, buf2, 512 * 8, 512 * 8); * * If you just have one buffer, use ocfs2_hamming_encode_block(). */ u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr) { unsigned int i, b, p = 0; BUG_ON(!d); /* * b is the hamming code bit number. Hamming code specifies a * 1-based array, but C uses 0-based. So 'i' is for C, and 'b' is * for the algorithm. * * The i++ in the for loop is so that the start offset passed * to ocfs2_find_next_bit_set() is one greater than the previously * found bit. */ for (i = 0; (i = ocfs2_find_next_bit(data, d, i)) < d; i++) { /* * i is the offset in this hunk, nr + i is the total bit * offset. */ b = calc_code_bit(nr + i, &p); /* * Data bits in the resultant code are checked by * parity bits that are part of the bit number * representation. Huh? * * <wikipedia href="https://en.wikipedia.org/wiki/Hamming_code"> * In other words, the parity bit at position 2^k * checks bits in positions having bit k set in * their binary representation. Conversely, for * instance, bit 13, i.e. 1101(2), is checked by * bits 1000(2) = 8, 0100(2)=4 and 0001(2) = 1. * </wikipedia> * * Note that 'k' is the _code_ bit number. 'b' in * our loop. */ parity ^= b; } /* While the data buffer was treated as little endian, the * return value is in host endian. */ return parity; } u32 ocfs2_hamming_encode_block(void *data, unsigned int blocksize) { return ocfs2_hamming_encode(0, data, blocksize * 8, 0); } /* * Like ocfs2_hamming_encode(), this can handle hunks. nr is the bit * offset of the current hunk. If bit to be fixed is not part of the * current hunk, this does nothing. * * If you only have one hunk, use ocfs2_hamming_fix_block(). */ void ocfs2_hamming_fix(void *data, unsigned int d, unsigned int nr, unsigned int fix) { unsigned int i, b; BUG_ON(!d); /* * If the bit to fix has an hweight of 1, it's a parity bit. One * busted parity bit is its own error. Nothing to do here. */ if (hweight32(fix) == 1) return; /* * nr + d is the bit right past the data hunk we're looking at. * If fix after that, nothing to do */ if (fix >= calc_code_bit(nr + d, NULL)) return; /* * nr is the offset in the data hunk we're starting at. Let's * start b at the offset in the code buffer. See hamming_encode() * for a more detailed description of 'b'. */ b = calc_code_bit(nr, NULL); /* If the fix is before this hunk, nothing to do */ if (fix < b) return; for (i = 0; i < d; i++, b++) { /* Skip past parity bits */ while (hweight32(b) == 1) b++; /* * i is the offset in this data hunk. * nr + i is the offset in the total data buffer. * b is the offset in the total code buffer. * * Thus, when b == fix, bit i in the current hunk needs * fixing. */ if (b == fix) { if (ocfs2_test_bit(i, data)) ocfs2_clear_bit(i, data); else ocfs2_set_bit(i, data); break; } } } void ocfs2_hamming_fix_block(void *data, unsigned int blocksize, unsigned int fix) { ocfs2_hamming_fix(data, blocksize * 8, 0, fix); } /* * Debugfs handling. */ #ifdef CONFIG_DEBUG_FS static int blockcheck_u64_get(void *data, u64 *val) { *val = *(u64 *)data; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(blockcheck_fops, blockcheck_u64_get, NULL, "%llu\n"); static void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats) { if (stats) { debugfs_remove_recursive(stats->b_debug_dir); stats->b_debug_dir = NULL; } } static void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats, struct dentry *parent) { struct dentry *dir; dir = debugfs_create_dir("blockcheck", parent); stats->b_debug_dir = dir; debugfs_create_file("blocks_checked", S_IFREG | S_IRUSR, dir, &stats->b_check_count, &blockcheck_fops); debugfs_create_file("checksums_failed", S_IFREG | S_IRUSR, dir, &stats->b_failure_count, &blockcheck_fops); debugfs_create_file("ecc_recoveries", S_IFREG | S_IRUSR, dir, &stats->b_recover_count, &blockcheck_fops); } #else static inline void ocfs2_blockcheck_debug_install(struct ocfs2_blockcheck_stats *stats, struct dentry *parent) { } static inline void ocfs2_blockcheck_debug_remove(struct ocfs2_blockcheck_stats *stats) { } #endif /* CONFIG_DEBUG_FS */ /* Always-called wrappers for starting and stopping the debugfs files */ void ocfs2_blockcheck_stats_debugfs_install(struct ocfs2_blockcheck_stats *stats, struct dentry *parent) { ocfs2_blockcheck_debug_install(stats, parent); } void ocfs2_blockcheck_stats_debugfs_remove(struct ocfs2_blockcheck_stats *stats) { ocfs2_blockcheck_debug_remove(stats); } static void ocfs2_blockcheck_inc_check(struct ocfs2_blockcheck_stats *stats) { u64 new_count; if (!stats) return; spin_lock(&stats->b_lock); stats->b_check_count++; new_count = stats->b_check_count; spin_unlock(&stats->b_lock); if (!new_count) mlog(ML_NOTICE, "Block check count has wrapped\n"); } static void ocfs2_blockcheck_inc_failure(struct ocfs2_blockcheck_stats *stats) { u64 new_count; if (!stats) return; spin_lock(&stats->b_lock); stats->b_failure_count++; new_count = stats->b_failure_count; spin_unlock(&stats->b_lock); if (!new_count) mlog(ML_NOTICE, "Checksum failure count has wrapped\n"); } static void ocfs2_blockcheck_inc_recover(struct ocfs2_blockcheck_stats *stats) { u64 new_count; if (!stats) return; spin_lock(&stats->b_lock); stats->b_recover_count++; new_count = stats->b_recover_count; spin_unlock(&stats->b_lock); if (!new_count) mlog(ML_NOTICE, "ECC recovery count has wrapped\n"); } /* * These are the low-level APIs for using the ocfs2_block_check structure. */ /* * This function generates check information for a block. * data is the block to be checked. bc is a pointer to the * ocfs2_block_check structure describing the crc32 and the ecc. * * bc should be a pointer inside data, as the function will * take care of zeroing it before calculating the check information. If * bc does not point inside data, the caller must make sure any inline * ocfs2_block_check structures are zeroed. * * The data buffer must be in on-disk endian (little endian for ocfs2). * bc will be filled with little-endian values and will be ready to go to * disk. */ void ocfs2_block_check_compute(void *data, size_t blocksize, struct ocfs2_block_check *bc) { u32 crc; u32 ecc; memset(bc, 0, sizeof(struct ocfs2_block_check)); crc = crc32_le(~0, data, blocksize); ecc = ocfs2_hamming_encode_block(data, blocksize); /* * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no * larger than 16 bits. */ BUG_ON(ecc > USHRT_MAX); bc->bc_crc32e = cpu_to_le32(crc); bc->bc_ecc = cpu_to_le16((u16)ecc); } /* * This function validates existing check information. Like _compute, * the function will take care of zeroing bc before calculating check codes. * If bc is not a pointer inside data, the caller must have zeroed any * inline ocfs2_block_check structures. * * Again, the data passed in should be the on-disk endian. */ int ocfs2_block_check_validate(void *data, size_t blocksize, struct ocfs2_block_check *bc, struct ocfs2_blockcheck_stats *stats) { int rc = 0; u32 bc_crc32e; u16 bc_ecc; u32 crc, ecc; ocfs2_blockcheck_inc_check(stats); bc_crc32e = le32_to_cpu(bc->bc_crc32e); bc_ecc = le16_to_cpu(bc->bc_ecc); memset(bc, 0, sizeof(struct ocfs2_block_check)); /* Fast path - if the crc32 validates, we're good to go */ crc = crc32_le(~0, data, blocksize); if (crc == bc_crc32e) goto out; ocfs2_blockcheck_inc_failure(stats); mlog(ML_ERROR, "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n", (unsigned int)bc_crc32e, (unsigned int)crc); /* Ok, try ECC fixups */ ecc = ocfs2_hamming_encode_block(data, blocksize); ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc); /* And check the crc32 again */ crc = crc32_le(~0, data, blocksize); if (crc == bc_crc32e) { ocfs2_blockcheck_inc_recover(stats); goto out; } mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", (unsigned int)bc_crc32e, (unsigned int)crc); rc = -EIO; out: bc->bc_crc32e = cpu_to_le32(bc_crc32e); bc->bc_ecc = cpu_to_le16(bc_ecc); return rc; } /* * This function generates check information for a list of buffer_heads. * bhs is the blocks to be checked. bc is a pointer to the * ocfs2_block_check structure describing the crc32 and the ecc. * * bc should be a pointer inside data, as the function will * take care of zeroing it before calculating the check information. If * bc does not point inside data, the caller must make sure any inline * ocfs2_block_check structures are zeroed. * * The data buffer must be in on-disk endian (little endian for ocfs2). * bc will be filled with little-endian values and will be ready to go to * disk. */ void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc) { int i; u32 crc, ecc; BUG_ON(nr < 0); if (!nr) return; memset(bc, 0, sizeof(struct ocfs2_block_check)); for (i = 0, crc = ~0, ecc = 0; i < nr; i++) { crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); /* * The number of bits in a buffer is obviously b_size*8. * The offset of this buffer is b_size*i, so the bit offset * of this buffer is b_size*8*i. */ ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, bhs[i]->b_size * 8, bhs[i]->b_size * 8 * i); } /* * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no * larger than 16 bits. */ BUG_ON(ecc > USHRT_MAX); bc->bc_crc32e = cpu_to_le32(crc); bc->bc_ecc = cpu_to_le16((u16)ecc); } /* * This function validates existing check information on a list of * buffer_heads. Like _compute_bhs, the function will take care of * zeroing bc before calculating check codes. If bc is not a pointer * inside data, the caller must have zeroed any inline * ocfs2_block_check structures. * * Again, the data passed in should be the on-disk endian. */ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc, struct ocfs2_blockcheck_stats *stats) { int i, rc = 0; u32 bc_crc32e; u16 bc_ecc; u32 crc, ecc, fix; BUG_ON(nr < 0); if (!nr) return 0; ocfs2_blockcheck_inc_check(stats); bc_crc32e = le32_to_cpu(bc->bc_crc32e); bc_ecc = le16_to_cpu(bc->bc_ecc); memset(bc, 0, sizeof(struct ocfs2_block_check)); /* Fast path - if the crc32 validates, we're good to go */ for (i = 0, crc = ~0; i < nr; i++) crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); if (crc == bc_crc32e) goto out; ocfs2_blockcheck_inc_failure(stats); mlog(ML_ERROR, "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", (unsigned int)bc_crc32e, (unsigned int)crc); /* Ok, try ECC fixups */ for (i = 0, ecc = 0; i < nr; i++) { /* * The number of bits in a buffer is obviously b_size*8. * The offset of this buffer is b_size*i, so the bit offset * of this buffer is b_size*8*i. */ ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, bhs[i]->b_size * 8, bhs[i]->b_size * 8 * i); } fix = ecc ^ bc_ecc; for (i = 0; i < nr; i++) { /* * Try the fix against each buffer. It will only affect * one of them. */ ocfs2_hamming_fix(bhs[i]->b_data, bhs[i]->b_size * 8, bhs[i]->b_size * 8 * i, fix); } /* And check the crc32 again */ for (i = 0, crc = ~0; i < nr; i++) crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); if (crc == bc_crc32e) { ocfs2_blockcheck_inc_recover(stats); goto out; } mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", (unsigned int)bc_crc32e, (unsigned int)crc); rc = -EIO; out: bc->bc_crc32e = cpu_to_le32(bc_crc32e); bc->bc_ecc = cpu_to_le16(bc_ecc); return rc; } /* * These are the main API. They check the superblock flag before * calling the underlying operations. * * They expect the buffer(s) to be in disk format. */ void ocfs2_compute_meta_ecc(struct super_block *sb, void *data, struct ocfs2_block_check *bc) { if (ocfs2_meta_ecc(OCFS2_SB(sb))) ocfs2_block_check_compute(data, sb->s_blocksize, bc); } int ocfs2_validate_meta_ecc(struct super_block *sb, void *data, struct ocfs2_block_check *bc) { int rc = 0; struct ocfs2_super *osb = OCFS2_SB(sb); if (ocfs2_meta_ecc(osb)) rc = ocfs2_block_check_validate(data, sb->s_blocksize, bc, &osb->osb_ecc_stats); return rc; } void ocfs2_compute_meta_ecc_bhs(struct super_block *sb, struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc) { if (ocfs2_meta_ecc(OCFS2_SB(sb))) ocfs2_block_check_compute_bhs(bhs, nr, bc); } int ocfs2_validate_meta_ecc_bhs(struct super_block *sb, struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc) { int rc = 0; struct ocfs2_super *osb = OCFS2_SB(sb); if (ocfs2_meta_ecc(osb)) rc = ocfs2_block_check_validate_bhs(bhs, nr, bc, &osb->osb_ecc_stats); return rc; } |
| 358 358 355 442 442 439 441 442 47 47 441 441 303 303 303 303 303 286 4 284 283 209 238 303 303 441 441 17 18 18 18 18 18 18 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2008 Oracle. All rights reserved. */ #include <linux/sched.h> #include <linux/pagemap.h> #include <linux/spinlock.h> #include <linux/page-flags.h> #include <asm/bug.h> #include <trace/events/btrfs.h> #include "misc.h" #include "ctree.h" #include "extent_io.h" #include "locking.h" /* * Lockdep class keys for extent_buffer->lock's in this root. For a given * eb, the lockdep key is determined by the btrfs_root it belongs to and * the level the eb occupies in the tree. * * Different roots are used for different purposes and may nest inside each * other and they require separate keysets. As lockdep keys should be * static, assign keysets according to the purpose of the root as indicated * by btrfs_root->root_key.objectid. This ensures that all special purpose * roots have separate keysets. * * Lock-nesting across peer nodes is always done with the immediate parent * node locked thus preventing deadlock. As lockdep doesn't know this, use * subclass to avoid triggering lockdep warning in such cases. * * The key is set by the readpage_end_io_hook after the buffer has passed * csum validation but before the pages are unlocked. It is also set by * btrfs_init_new_buffer on freshly allocated blocks. * * We also add a check to make sure the highest level of the tree is the * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code * needs update as well. */ #ifdef CONFIG_DEBUG_LOCK_ALLOC #if BTRFS_MAX_LEVEL != 8 #error #endif #define DEFINE_LEVEL(stem, level) \ .names[level] = "btrfs-" stem "-0" #level, #define DEFINE_NAME(stem) \ DEFINE_LEVEL(stem, 0) \ DEFINE_LEVEL(stem, 1) \ DEFINE_LEVEL(stem, 2) \ DEFINE_LEVEL(stem, 3) \ DEFINE_LEVEL(stem, 4) \ DEFINE_LEVEL(stem, 5) \ DEFINE_LEVEL(stem, 6) \ DEFINE_LEVEL(stem, 7) static struct btrfs_lockdep_keyset { u64 id; /* root objectid */ /* Longest entry: btrfs-block-group-00 */ char names[BTRFS_MAX_LEVEL][24]; struct lock_class_key keys[BTRFS_MAX_LEVEL]; } btrfs_lockdep_keysets[] = { { .id = BTRFS_ROOT_TREE_OBJECTID, DEFINE_NAME("root") }, { .id = BTRFS_EXTENT_TREE_OBJECTID, DEFINE_NAME("extent") }, { .id = BTRFS_CHUNK_TREE_OBJECTID, DEFINE_NAME("chunk") }, { .id = BTRFS_DEV_TREE_OBJECTID, DEFINE_NAME("dev") }, { .id = BTRFS_CSUM_TREE_OBJECTID, DEFINE_NAME("csum") }, { .id = BTRFS_QUOTA_TREE_OBJECTID, DEFINE_NAME("quota") }, { .id = BTRFS_TREE_LOG_OBJECTID, DEFINE_NAME("log") }, { .id = BTRFS_TREE_RELOC_OBJECTID, DEFINE_NAME("treloc") }, { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, DEFINE_NAME("dreloc") }, { .id = BTRFS_UUID_TREE_OBJECTID, DEFINE_NAME("uuid") }, { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, DEFINE_NAME("free-space") }, { .id = BTRFS_BLOCK_GROUP_TREE_OBJECTID, DEFINE_NAME("block-group") }, { .id = BTRFS_RAID_STRIPE_TREE_OBJECTID, DEFINE_NAME("raid-stripe") }, { .id = 0, DEFINE_NAME("tree") }, }; #undef DEFINE_LEVEL #undef DEFINE_NAME void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level) { struct btrfs_lockdep_keyset *ks; ASSERT(level < ARRAY_SIZE(ks->keys)); /* Find the matching keyset, id 0 is the default entry */ for (ks = btrfs_lockdep_keysets; ks->id; ks++) if (ks->id == objectid) break; lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]); } void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb) { if (test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) btrfs_set_buffer_lockdep_class(btrfs_root_id(root), eb, btrfs_header_level(eb)); } #endif #ifdef CONFIG_BTRFS_DEBUG static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { eb->lock_owner = owner; } #else static void btrfs_set_eb_lock_owner(struct extent_buffer *eb, pid_t owner) { } #endif /* * Extent buffer locking * ===================== * * We use a rw_semaphore for tree locking, and the semantics are exactly the * same: * * - reader/writer exclusion * - writer/writer exclusion * - reader/reader sharing * - try-lock semantics for readers and writers * * The rwsem implementation does opportunistic spinning which reduces number of * times the locking task needs to sleep. */ /* * btrfs_tree_read_lock_nested - lock extent buffer for read * @eb: the eb to be locked * @nest: the nesting level to be used for lockdep * * This takes the read lock on the extent buffer, using the specified nesting * level for lockdep purposes. */ void btrfs_tree_read_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest) { u64 start_ns = 0; if (trace_btrfs_tree_read_lock_enabled()) start_ns = ktime_get_ns(); down_read_nested(&eb->lock, nest); trace_btrfs_tree_read_lock(eb, start_ns); } /* * Try-lock for read. * * Return 1 if the rwlock has been taken, 0 otherwise */ int btrfs_try_tree_read_lock(struct extent_buffer *eb) { if (down_read_trylock(&eb->lock)) { trace_btrfs_try_tree_read_lock(eb); return 1; } return 0; } /* * Try-lock for write. * * Return 1 if the rwlock has been taken, 0 otherwise */ int btrfs_try_tree_write_lock(struct extent_buffer *eb) { if (down_write_trylock(&eb->lock)) { btrfs_set_eb_lock_owner(eb, current->pid); trace_btrfs_try_tree_write_lock(eb); return 1; } return 0; } /* * Release read lock. */ void btrfs_tree_read_unlock(struct extent_buffer *eb) { trace_btrfs_tree_read_unlock(eb); up_read(&eb->lock); } /* * Lock eb for write. * * @eb: the eb to lock * @nest: the nesting to use for the lock * * Returns with the eb->lock write locked. */ void btrfs_tree_lock_nested(struct extent_buffer *eb, enum btrfs_lock_nesting nest) __acquires(&eb->lock) { u64 start_ns = 0; if (trace_btrfs_tree_lock_enabled()) start_ns = ktime_get_ns(); down_write_nested(&eb->lock, nest); btrfs_set_eb_lock_owner(eb, current->pid); trace_btrfs_tree_lock(eb, start_ns); } /* * Release the write lock. */ void btrfs_tree_unlock(struct extent_buffer *eb) { trace_btrfs_tree_unlock(eb); btrfs_set_eb_lock_owner(eb, 0); up_write(&eb->lock); } /* * This releases any locks held in the path starting at level and going all the * way up to the root. * * btrfs_search_slot will keep the lock held on higher nodes in a few corner * cases, such as COW of the block at slot zero in the node. This ignores * those rules, and it should only be called when there are no more updates to * be done higher up in the tree. */ void btrfs_unlock_up_safe(struct btrfs_path *path, int level) { int i; if (path->keep_locks) return; for (i = level; i < BTRFS_MAX_LEVEL; i++) { if (!path->nodes[i]) continue; if (!path->locks[i]) continue; btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]); path->locks[i] = 0; } } /* * Loop around taking references on and locking the root node of the tree until * we end up with a lock on the root node. * * Return: root extent buffer with write lock held */ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); btrfs_maybe_reset_lockdep_class(root, eb); btrfs_tree_lock(eb); if (eb == root->node) break; btrfs_tree_unlock(eb); free_extent_buffer(eb); } return eb; } /* * Loop around taking references on and locking the root node of the tree until * we end up with a lock on the root node. * * Return: root extent buffer with read lock held */ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); btrfs_maybe_reset_lockdep_class(root, eb); btrfs_tree_read_lock(eb); if (eb == root->node) break; btrfs_tree_read_unlock(eb); free_extent_buffer(eb); } return eb; } /* * Loop around taking references on and locking the root node of the tree in * nowait mode until we end up with a lock on the root node or returning to * avoid blocking. * * Return: root extent buffer with read lock held or -EAGAIN. */ struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root) { struct extent_buffer *eb; while (1) { eb = btrfs_root_node(root); if (!btrfs_try_tree_read_lock(eb)) { free_extent_buffer(eb); return ERR_PTR(-EAGAIN); } if (eb == root->node) break; btrfs_tree_read_unlock(eb); free_extent_buffer(eb); } return eb; } /* * DREW locks * ========== * * DREW stands for double-reader-writer-exclusion lock. It's used in situation * where you want to provide A-B exclusion but not AA or BB. * * Currently implementation gives more priority to reader. If a reader and a * writer both race to acquire their respective sides of the lock the writer * would yield its lock as soon as it detects a concurrent reader. Additionally * if there are pending readers no new writers would be allowed to come in and * acquire the lock. */ void btrfs_drew_lock_init(struct btrfs_drew_lock *lock) { atomic_set(&lock->readers, 0); atomic_set(&lock->writers, 0); init_waitqueue_head(&lock->pending_readers); init_waitqueue_head(&lock->pending_writers); } /* Return true if acquisition is successful, false otherwise */ bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) { if (atomic_read(&lock->readers)) return false; atomic_inc(&lock->writers); /* Ensure writers count is updated before we check for pending readers */ smp_mb__after_atomic(); if (atomic_read(&lock->readers)) { btrfs_drew_write_unlock(lock); return false; } return true; } void btrfs_drew_write_lock(struct btrfs_drew_lock *lock) { while (true) { if (btrfs_drew_try_write_lock(lock)) return; wait_event(lock->pending_writers, !atomic_read(&lock->readers)); } } void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) { /* * atomic_dec_and_test() implies a full barrier, so woken up readers are * guaranteed to see the decrement. */ if (atomic_dec_and_test(&lock->writers)) wake_up(&lock->pending_readers); } void btrfs_drew_read_lock(struct btrfs_drew_lock *lock) { atomic_inc(&lock->readers); /* * Ensure the pending reader count is perceieved BEFORE this reader * goes to sleep in case of active writers. This guarantees new writers * won't be allowed and that the current reader will be woken up when * the last active writer finishes its jobs. */ smp_mb__after_atomic(); wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0); } void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) { /* * atomic_dec_and_test implies a full barrier, so woken up writers * are guaranteed to see the decrement */ if (atomic_dec_and_test(&lock->readers)) wake_up(&lock->pending_writers); } |
| 7 7 7 7 7 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 | // SPDX-License-Identifier: GPL-2.0-or-later /* * ChaCha20-Poly1305 AEAD, RFC7539 * * Copyright (C) 2015 Martin Willi */ #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <crypto/chacha.h> #include <crypto/poly1305.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> struct chachapoly_instance_ctx { struct crypto_skcipher_spawn chacha; struct crypto_ahash_spawn poly; unsigned int saltlen; }; struct chachapoly_ctx { struct crypto_skcipher *chacha; struct crypto_ahash *poly; /* key bytes we use for the ChaCha20 IV */ unsigned int saltlen; u8 salt[]; }; struct poly_req { /* zero byte padding for AD/ciphertext, as needed */ u8 pad[POLY1305_BLOCK_SIZE]; /* tail data with AD/ciphertext lengths */ struct { __le64 assoclen; __le64 cryptlen; } tail; struct scatterlist src[1]; struct ahash_request req; /* must be last member */ }; struct chacha_req { u8 iv[CHACHA_IV_SIZE]; struct scatterlist src[1]; struct skcipher_request req; /* must be last member */ }; struct chachapoly_req_ctx { struct scatterlist src[2]; struct scatterlist dst[2]; /* the key we generate for Poly1305 using Chacha20 */ u8 key[POLY1305_KEY_SIZE]; /* calculated Poly1305 tag */ u8 tag[POLY1305_DIGEST_SIZE]; /* length of data to en/decrypt, without ICV */ unsigned int cryptlen; /* Actual AD, excluding IV */ unsigned int assoclen; /* request flags, with MAY_SLEEP cleared if needed */ u32 flags; union { struct poly_req poly; struct chacha_req chacha; } u; }; static inline void async_done_continue(struct aead_request *req, int err, int (*cont)(struct aead_request *)) { if (!err) { struct chachapoly_req_ctx *rctx = aead_request_ctx(req); rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = cont(req); } if (err != -EINPROGRESS && err != -EBUSY) aead_request_complete(req, err); } static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); __le32 leicb = cpu_to_le32(icb); memcpy(iv, &leicb, sizeof(leicb)); memcpy(iv + sizeof(leicb), ctx->salt, ctx->saltlen); memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv, CHACHA_IV_SIZE - sizeof(leicb) - ctx->saltlen); } static int poly_verify_tag(struct aead_request *req) { struct chachapoly_req_ctx *rctx = aead_request_ctx(req); u8 tag[sizeof(rctx->tag)]; scatterwalk_map_and_copy(tag, req->src, req->assoclen + rctx->cryptlen, sizeof(tag), 0); if (crypto_memneq(tag, rctx->tag, sizeof(tag))) return -EBADMSG; return 0; } static int poly_copy_tag(struct aead_request *req) { struct chachapoly_req_ctx *rctx = aead_request_ctx(req); scatterwalk_map_and_copy(rctx->tag, req->dst, req->assoclen + rctx->cryptlen, sizeof(rctx->tag), 1); return 0; } static void chacha_decrypt_done(void *data, int err) { async_done_continue(data, err, poly_verify_tag); } static int chacha_decrypt(struct aead_request *req) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct chacha_req *creq = &rctx->u.chacha; struct scatterlist *src, *dst; int err; if (rctx->cryptlen == 0) goto skip; chacha_iv(creq->iv, req, 1); src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); dst = src; if (req->src != req->dst) dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); skcipher_request_set_callback(&creq->req, rctx->flags, chacha_decrypt_done, req); skcipher_request_set_tfm(&creq->req, ctx->chacha); skcipher_request_set_crypt(&creq->req, src, dst, rctx->cryptlen, creq->iv); err = crypto_skcipher_decrypt(&creq->req); if (err) return err; skip: return poly_verify_tag(req); } static int poly_tail_continue(struct aead_request *req) { struct chachapoly_req_ctx *rctx = aead_request_ctx(req); if (rctx->cryptlen == req->cryptlen) /* encrypting */ return poly_copy_tag(req); return chacha_decrypt(req); } static void poly_tail_done(void *data, int err) { async_done_continue(data, err, poly_tail_continue); } static int poly_tail(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct poly_req *preq = &rctx->u.poly; int err; preq->tail.assoclen = cpu_to_le64(rctx->assoclen); preq->tail.cryptlen = cpu_to_le64(rctx->cryptlen); sg_init_one(preq->src, &preq->tail, sizeof(preq->tail)); ahash_request_set_callback(&preq->req, rctx->flags, poly_tail_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, preq->src, rctx->tag, sizeof(preq->tail)); err = crypto_ahash_finup(&preq->req); if (err) return err; return poly_tail_continue(req); } static void poly_cipherpad_done(void *data, int err) { async_done_continue(data, err, poly_tail); } static int poly_cipherpad(struct aead_request *req) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct poly_req *preq = &rctx->u.poly; unsigned int padlen; int err; padlen = -rctx->cryptlen % POLY1305_BLOCK_SIZE; memset(preq->pad, 0, sizeof(preq->pad)); sg_init_one(preq->src, preq->pad, padlen); ahash_request_set_callback(&preq->req, rctx->flags, poly_cipherpad_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); err = crypto_ahash_update(&preq->req); if (err) return err; return poly_tail(req); } static void poly_cipher_done(void *data, int err) { async_done_continue(data, err, poly_cipherpad); } static int poly_cipher(struct aead_request *req) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct poly_req *preq = &rctx->u.poly; struct scatterlist *crypt = req->src; int err; if (rctx->cryptlen == req->cryptlen) /* encrypting */ crypt = req->dst; crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen); ahash_request_set_callback(&preq->req, rctx->flags, poly_cipher_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen); err = crypto_ahash_update(&preq->req); if (err) return err; return poly_cipherpad(req); } static void poly_adpad_done(void *data, int err) { async_done_continue(data, err, poly_cipher); } static int poly_adpad(struct aead_request *req) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct poly_req *preq = &rctx->u.poly; unsigned int padlen; int err; padlen = -rctx->assoclen % POLY1305_BLOCK_SIZE; memset(preq->pad, 0, sizeof(preq->pad)); sg_init_one(preq->src, preq->pad, padlen); ahash_request_set_callback(&preq->req, rctx->flags, poly_adpad_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen); err = crypto_ahash_update(&preq->req); if (err) return err; return poly_cipher(req); } static void poly_ad_done(void *data, int err) { async_done_continue(data, err, poly_adpad); } static int poly_ad(struct aead_request *req) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct poly_req *preq = &rctx->u.poly; int err; ahash_request_set_callback(&preq->req, rctx->flags, poly_ad_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen); err = crypto_ahash_update(&preq->req); if (err) return err; return poly_adpad(req); } static void poly_setkey_done(void *data, int err) { async_done_continue(data, err, poly_ad); } static int poly_setkey(struct aead_request *req) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct poly_req *preq = &rctx->u.poly; int err; sg_init_one(preq->src, rctx->key, sizeof(rctx->key)); ahash_request_set_callback(&preq->req, rctx->flags, poly_setkey_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key)); err = crypto_ahash_update(&preq->req); if (err) return err; return poly_ad(req); } static void poly_init_done(void *data, int err) { async_done_continue(data, err, poly_setkey); } static int poly_init(struct aead_request *req) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct poly_req *preq = &rctx->u.poly; int err; ahash_request_set_callback(&preq->req, rctx->flags, poly_init_done, req); ahash_request_set_tfm(&preq->req, ctx->poly); err = crypto_ahash_init(&preq->req); if (err) return err; return poly_setkey(req); } static void poly_genkey_done(void *data, int err) { async_done_continue(data, err, poly_init); } static int poly_genkey(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct chacha_req *creq = &rctx->u.chacha; int err; rctx->assoclen = req->assoclen; if (crypto_aead_ivsize(tfm) == 8) { if (rctx->assoclen < 8) return -EINVAL; rctx->assoclen -= 8; } memset(rctx->key, 0, sizeof(rctx->key)); sg_init_one(creq->src, rctx->key, sizeof(rctx->key)); chacha_iv(creq->iv, req, 0); skcipher_request_set_callback(&creq->req, rctx->flags, poly_genkey_done, req); skcipher_request_set_tfm(&creq->req, ctx->chacha); skcipher_request_set_crypt(&creq->req, creq->src, creq->src, POLY1305_KEY_SIZE, creq->iv); err = crypto_skcipher_decrypt(&creq->req); if (err) return err; return poly_init(req); } static void chacha_encrypt_done(void *data, int err) { async_done_continue(data, err, poly_genkey); } static int chacha_encrypt(struct aead_request *req) { struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct chachapoly_req_ctx *rctx = aead_request_ctx(req); struct chacha_req *creq = &rctx->u.chacha; struct scatterlist *src, *dst; int err; if (req->cryptlen == 0) goto skip; chacha_iv(creq->iv, req, 1); src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); dst = src; if (req->src != req->dst) dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); skcipher_request_set_callback(&creq->req, rctx->flags, chacha_encrypt_done, req); skcipher_request_set_tfm(&creq->req, ctx->chacha); skcipher_request_set_crypt(&creq->req, src, dst, req->cryptlen, creq->iv); err = crypto_skcipher_encrypt(&creq->req); if (err) return err; skip: return poly_genkey(req); } static int chachapoly_encrypt(struct aead_request *req) { struct chachapoly_req_ctx *rctx = aead_request_ctx(req); rctx->cryptlen = req->cryptlen; rctx->flags = aead_request_flags(req); /* encrypt call chain: * - chacha_encrypt/done() * - poly_genkey/done() * - poly_init/done() * - poly_setkey/done() * - poly_ad/done() * - poly_adpad/done() * - poly_cipher/done() * - poly_cipherpad/done() * - poly_tail/done/continue() * - poly_copy_tag() */ return chacha_encrypt(req); } static int chachapoly_decrypt(struct aead_request *req) { struct chachapoly_req_ctx *rctx = aead_request_ctx(req); rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE; rctx->flags = aead_request_flags(req); /* decrypt call chain: * - poly_genkey/done() * - poly_init/done() * - poly_setkey/done() * - poly_ad/done() * - poly_adpad/done() * - poly_cipher/done() * - poly_cipherpad/done() * - poly_tail/done/continue() * - chacha_decrypt/done() * - poly_verify_tag() */ return poly_genkey(req); } static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct chachapoly_ctx *ctx = crypto_aead_ctx(aead); if (keylen != ctx->saltlen + CHACHA_KEY_SIZE) return -EINVAL; keylen -= ctx->saltlen; memcpy(ctx->salt, key + keylen, ctx->saltlen); crypto_skcipher_clear_flags(ctx->chacha, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ctx->chacha, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(ctx->chacha, key, keylen); } static int chachapoly_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { if (authsize != POLY1305_DIGEST_SIZE) return -EINVAL; return 0; } static int chachapoly_init(struct crypto_aead *tfm) { struct aead_instance *inst = aead_alg_instance(tfm); struct chachapoly_instance_ctx *ictx = aead_instance_ctx(inst); struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_skcipher *chacha; struct crypto_ahash *poly; unsigned long align; poly = crypto_spawn_ahash(&ictx->poly); if (IS_ERR(poly)) return PTR_ERR(poly); chacha = crypto_spawn_skcipher(&ictx->chacha); if (IS_ERR(chacha)) { crypto_free_ahash(poly); return PTR_ERR(chacha); } ctx->chacha = chacha; ctx->poly = poly; ctx->saltlen = ictx->saltlen; align = crypto_aead_alignmask(tfm); align &= ~(crypto_tfm_ctx_alignment() - 1); crypto_aead_set_reqsize( tfm, align + offsetof(struct chachapoly_req_ctx, u) + max(offsetof(struct chacha_req, req) + sizeof(struct skcipher_request) + crypto_skcipher_reqsize(chacha), offsetof(struct poly_req, req) + sizeof(struct ahash_request) + crypto_ahash_reqsize(poly))); return 0; } static void chachapoly_exit(struct crypto_aead *tfm) { struct chachapoly_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_ahash(ctx->poly); crypto_free_skcipher(ctx->chacha); } static void chachapoly_free(struct aead_instance *inst) { struct chachapoly_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_skcipher(&ctx->chacha); crypto_drop_ahash(&ctx->poly); kfree(inst); } static int chachapoly_create(struct crypto_template *tmpl, struct rtattr **tb, const char *name, unsigned int ivsize) { u32 mask; struct aead_instance *inst; struct chachapoly_instance_ctx *ctx; struct skcipher_alg_common *chacha; struct hash_alg_common *poly; int err; if (ivsize > CHACHAPOLY_IV_SIZE) return -EINVAL; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask); if (err) return err; inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = aead_instance_ctx(inst); ctx->saltlen = CHACHAPOLY_IV_SIZE - ivsize; err = crypto_grab_skcipher(&ctx->chacha, aead_crypto_instance(inst), crypto_attr_alg_name(tb[1]), 0, mask); if (err) goto err_free_inst; chacha = crypto_spawn_skcipher_alg_common(&ctx->chacha); err = crypto_grab_ahash(&ctx->poly, aead_crypto_instance(inst), crypto_attr_alg_name(tb[2]), 0, mask); if (err) goto err_free_inst; poly = crypto_spawn_ahash_alg(&ctx->poly); err = -EINVAL; if (poly->digestsize != POLY1305_DIGEST_SIZE) goto err_free_inst; /* Need 16-byte IV size, including Initial Block Counter value */ if (chacha->ivsize != CHACHA_IV_SIZE) goto err_free_inst; /* Not a stream cipher? */ if (chacha->base.cra_blocksize != 1) goto err_free_inst; err = -ENAMETOOLONG; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s,%s)", name, chacha->base.cra_name, poly->base.cra_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s,%s)", name, chacha->base.cra_driver_name, poly->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; inst->alg.base.cra_priority = (chacha->base.cra_priority + poly->base.cra_priority) / 2; inst->alg.base.cra_blocksize = 1; inst->alg.base.cra_alignmask = chacha->base.cra_alignmask; inst->alg.base.cra_ctxsize = sizeof(struct chachapoly_ctx) + ctx->saltlen; inst->alg.ivsize = ivsize; inst->alg.chunksize = chacha->chunksize; inst->alg.maxauthsize = POLY1305_DIGEST_SIZE; inst->alg.init = chachapoly_init; inst->alg.exit = chachapoly_exit; inst->alg.encrypt = chachapoly_encrypt; inst->alg.decrypt = chachapoly_decrypt; inst->alg.setkey = chachapoly_setkey; inst->alg.setauthsize = chachapoly_setauthsize; inst->free = chachapoly_free; err = aead_register_instance(tmpl, inst); if (err) { err_free_inst: chachapoly_free(inst); } return err; } static int rfc7539_create(struct crypto_template *tmpl, struct rtattr **tb) { return chachapoly_create(tmpl, tb, "rfc7539", 12); } static int rfc7539esp_create(struct crypto_template *tmpl, struct rtattr **tb) { return chachapoly_create(tmpl, tb, "rfc7539esp", 8); } static struct crypto_template rfc7539_tmpls[] = { { .name = "rfc7539", .create = rfc7539_create, .module = THIS_MODULE, }, { .name = "rfc7539esp", .create = rfc7539esp_create, .module = THIS_MODULE, }, }; static int __init chacha20poly1305_module_init(void) { return crypto_register_templates(rfc7539_tmpls, ARRAY_SIZE(rfc7539_tmpls)); } static void __exit chacha20poly1305_module_exit(void) { crypto_unregister_templates(rfc7539_tmpls, ARRAY_SIZE(rfc7539_tmpls)); } subsys_initcall(chacha20poly1305_module_init); module_exit(chacha20poly1305_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); MODULE_DESCRIPTION("ChaCha20-Poly1305 AEAD"); MODULE_ALIAS_CRYPTO("rfc7539"); MODULE_ALIAS_CRYPTO("rfc7539esp"); |
| 20 21 21 5 16 21 21 8 13 5 16 16 16 16 16 15 15 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 | // SPDX-License-Identifier: GPL-2.0-only /* * AppArmor security module * * This file contains AppArmor auditing functions * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. */ #include <linux/audit.h> #include <linux/socket.h> #include "include/apparmor.h" #include "include/audit.h" #include "include/policy.h" #include "include/policy_ns.h" #include "include/secid.h" const char *const audit_mode_names[] = { "normal", "quiet_denied", "quiet", "noquiet", "all" }; static const char *const aa_audit_type[] = { "AUDIT", "ALLOWED", "DENIED", "HINT", "STATUS", "ERROR", "KILLED", "AUTO" }; static const char *const aa_class_names[] = { "none", "unknown", "file", "cap", "net", "rlimits", "domain", "mount", "unknown", "ptrace", "signal", "xmatch", "unknown", "unknown", "net", "unknown", "label", "posix_mqueue", "io_uring", "module", "lsm", "namespace", "io_uring", "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "unknown", "X", "dbus", }; /* * Currently AppArmor auditing is fed straight into the audit framework. * * TODO: * netlink interface for complain mode * user auditing, - send user auditing to netlink interface * system control of whether user audit messages go to system log */ /** * audit_pre() - core AppArmor function. * @ab: audit buffer to fill (NOT NULL) * @va: audit structure containing data to audit (NOT NULL) * * Record common AppArmor audit data from @va */ static void audit_pre(struct audit_buffer *ab, void *va) { struct apparmor_audit_data *ad = aad_of_va(va); if (aa_g_audit_header) { audit_log_format(ab, "apparmor=\"%s\"", aa_audit_type[ad->type]); } if (ad->op) audit_log_format(ab, " operation=\"%s\"", ad->op); if (ad->class) audit_log_format(ab, " class=\"%s\"", ad->class <= AA_CLASS_LAST ? aa_class_names[ad->class] : "unknown"); if (ad->info) { audit_log_format(ab, " info=\"%s\"", ad->info); if (ad->error) audit_log_format(ab, " error=%d", ad->error); } if (ad->subj_label) { struct aa_label *label = ad->subj_label; if (label_isprofile(label)) { struct aa_profile *profile = labels_profile(label); if (profile->ns != root_ns) { audit_log_format(ab, " namespace="); audit_log_untrustedstring(ab, profile->ns->base.hname); } audit_log_format(ab, " profile="); audit_log_untrustedstring(ab, profile->base.hname); } else { audit_log_format(ab, " label="); aa_label_xaudit(ab, root_ns, label, FLAG_VIEW_SUBNS, GFP_ATOMIC); } } if (ad->name) { audit_log_format(ab, " name="); audit_log_untrustedstring(ab, ad->name); } } /** * aa_audit_msg - Log a message to the audit subsystem * @type: audit type for the message * @ad: audit event structure (NOT NULL) * @cb: optional callback fn for type specific fields (MAYBE NULL) */ void aa_audit_msg(int type, struct apparmor_audit_data *ad, void (*cb) (struct audit_buffer *, void *)) { ad->type = type; common_lsm_audit(&ad->common, audit_pre, cb); } /** * aa_audit - Log a profile based audit event to the audit subsystem * @type: audit type for the message * @profile: profile to check against (NOT NULL) * @ad: audit event (NOT NULL) * @cb: optional callback fn for type specific fields (MAYBE NULL) * * Handle default message switching based off of audit mode flags * * Returns: error on failure */ int aa_audit(int type, struct aa_profile *profile, struct apparmor_audit_data *ad, void (*cb) (struct audit_buffer *, void *)) { AA_BUG(!profile); if (type == AUDIT_APPARMOR_AUTO) { if (likely(!ad->error)) { if (AUDIT_MODE(profile) != AUDIT_ALL) return 0; type = AUDIT_APPARMOR_AUDIT; } else if (COMPLAIN_MODE(profile)) type = AUDIT_APPARMOR_ALLOWED; else type = AUDIT_APPARMOR_DENIED; } if (AUDIT_MODE(profile) == AUDIT_QUIET || (type == AUDIT_APPARMOR_DENIED && AUDIT_MODE(profile) == AUDIT_QUIET_DENIED)) return ad->error; if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED) type = AUDIT_APPARMOR_KILL; ad->subj_label = &profile->label; aa_audit_msg(type, ad, cb); if (ad->type == AUDIT_APPARMOR_KILL) (void)send_sig_info(SIGKILL, NULL, ad->common.type == LSM_AUDIT_DATA_TASK && ad->common.u.tsk ? ad->common.u.tsk : current); if (ad->type == AUDIT_APPARMOR_ALLOWED) return complain_error(ad->error); return ad->error; } struct aa_audit_rule { struct aa_label *label; }; void aa_audit_rule_free(void *vrule) { struct aa_audit_rule *rule = vrule; if (rule) { if (!IS_ERR(rule->label)) aa_put_label(rule->label); kfree(rule); } } int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp) { struct aa_audit_rule *rule; switch (field) { case AUDIT_SUBJ_ROLE: if (op != Audit_equal && op != Audit_not_equal) return -EINVAL; break; default: return -EINVAL; } rule = kzalloc(sizeof(struct aa_audit_rule), gfp); if (!rule) return -ENOMEM; /* Currently rules are treated as coming from the root ns */ rule->label = aa_label_parse(&root_ns->unconfined->label, rulestr, gfp, true, false); if (IS_ERR(rule->label)) { int err = PTR_ERR(rule->label); aa_audit_rule_free(rule); return err; } *vrule = rule; return 0; } int aa_audit_rule_known(struct audit_krule *rule) { int i; for (i = 0; i < rule->field_count; i++) { struct audit_field *f = &rule->fields[i]; switch (f->type) { case AUDIT_SUBJ_ROLE: return 1; } } return 0; } int aa_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule) { struct aa_audit_rule *rule = vrule; struct aa_label *label; int found = 0; label = aa_secid_to_label(sid); if (!label) return -ENOENT; if (aa_label_is_subset(label, rule->label)) found = 1; switch (field) { case AUDIT_SUBJ_ROLE: switch (op) { case Audit_equal: return found; case Audit_not_equal: return !found; } } return 0; } |
| 107 2 109 109 109 109 7 22 102 15 102 64 63 63 64 64 64 64 62 15 15 6 10 4 6 7 83 19 64 2 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 | // SPDX-License-Identifier: GPL-2.0 /* * Functions related to generic helpers functions */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/scatterlist.h> #include "blk.h" static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) { unsigned int discard_granularity = bdev_discard_granularity(bdev); sector_t granularity_aligned_sector; if (bdev_is_partition(bdev)) sector += bdev->bd_start_sect; granularity_aligned_sector = round_up(sector, discard_granularity >> SECTOR_SHIFT); /* * Make sure subsequent bios start aligned to the discard granularity if * it needs to be split. */ if (granularity_aligned_sector != sector) return granularity_aligned_sector - sector; /* * Align the bio size to the discard granularity to make splitting the bio * at discard granularity boundaries easier in the driver if needed. */ return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; } struct bio *blk_alloc_discard_bio(struct block_device *bdev, sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask) { sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector)); struct bio *bio; if (!bio_sects) return NULL; bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask); if (!bio) return NULL; bio->bi_iter.bi_sector = *sector; bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT; *sector += bio_sects; *nr_sects -= bio_sects; /* * We can loop for a long time in here if someone does full device * discards (like mkfs). Be nice and allow us to schedule out to avoid * softlocking if preempt is disabled. */ cond_resched(); return bio; } int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) { struct bio *bio; while ((bio = blk_alloc_discard_bio(bdev, §or, &nr_sects, gfp_mask))) *biop = bio_chain_and_submit(*biop, bio); return 0; } EXPORT_SYMBOL(__blkdev_issue_discard); /** * blkdev_issue_discard - queue a discard * @bdev: blockdev to issue discard for * @sector: start sector * @nr_sects: number of sectors to discard * @gfp_mask: memory allocation flags (for bio_alloc) * * Description: * Issue a discard request for the sectors in question. */ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) { struct bio *bio = NULL; struct blk_plug plug; int ret; blk_start_plug(&plug); ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); if (!ret && bio) { ret = submit_bio_wait(bio); if (ret == -EOPNOTSUPP) ret = 0; bio_put(bio); } blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(blkdev_issue_discard); static sector_t bio_write_zeroes_limit(struct block_device *bdev) { sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; return min(bdev_write_zeroes_sectors(bdev), (UINT_MAX >> SECTOR_SHIFT) & ~bs_mask); } static void __blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) { while (nr_sects) { unsigned int len = min_t(sector_t, nr_sects, bio_write_zeroes_limit(bdev)); struct bio *bio; if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) break; bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask); bio->bi_iter.bi_sector = sector; if (flags & BLKDEV_ZERO_NOUNMAP) bio->bi_opf |= REQ_NOUNMAP; bio->bi_iter.bi_size = len << SECTOR_SHIFT; *biop = bio_chain_and_submit(*biop, bio); nr_sects -= len; sector += len; cond_resched(); } } static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp, unsigned flags) { struct bio *bio = NULL; struct blk_plug plug; int ret = 0; blk_start_plug(&plug); __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, flags); if (bio) { if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) { bio_await_chain(bio); blk_finish_plug(&plug); return -EINTR; } ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); /* * For some devices there is no non-destructive way to verify whether * WRITE ZEROES is actually supported. These will clear the capability * on an I/O error, in which case we'll turn any error into * "not supported" here. */ if (ret && !bdev_write_zeroes_sectors(bdev)) return -EOPNOTSUPP; return ret; } /* * Convert a number of 512B sectors to a number of pages. * The result is limited to a number of pages that can fit into a BIO. * Also make sure that the result is always at least 1 (page) for the cases * where nr_sects is lower than the number of sectors in a page. */ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) { sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); return min(pages, (sector_t)BIO_MAX_VECS); } static void __blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned int flags) { while (nr_sects) { unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects); struct bio *bio; bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask); bio->bi_iter.bi_sector = sector; if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) break; do { unsigned int len, added; len = min_t(sector_t, PAGE_SIZE, nr_sects << SECTOR_SHIFT); added = bio_add_page(bio, ZERO_PAGE(0), len, 0); if (added < len) break; nr_sects -= added >> SECTOR_SHIFT; sector += added >> SECTOR_SHIFT; } while (nr_sects); *biop = bio_chain_and_submit(*biop, bio); cond_resched(); } } static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp, unsigned flags) { struct bio *bio = NULL; struct blk_plug plug; int ret = 0; if (flags & BLKDEV_ZERO_NOFALLBACK) return -EOPNOTSUPP; blk_start_plug(&plug); __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags); if (bio) { if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) { bio_await_chain(bio); blk_finish_plug(&plug); return -EINTR; } ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); return ret; } /** * __blkdev_issue_zeroout - generate number of zero filed write bios * @bdev: blockdev to issue * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @biop: pointer to anchor bio * @flags: controls detailed behavior * * Description: * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. * * If a device is using logical block provisioning, the underlying space will * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. * * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. */ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, unsigned flags) { if (bdev_read_only(bdev)) return -EPERM; if (bdev_write_zeroes_sectors(bdev)) { __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, biop, flags); } else { if (flags & BLKDEV_ZERO_NOFALLBACK) return -EOPNOTSUPP; __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, biop, flags); } return 0; } EXPORT_SYMBOL(__blkdev_issue_zeroout); /** * blkdev_issue_zeroout - zero-fill a block range * @bdev: blockdev to write * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * @flags: controls detailed behavior * * Description: * Zero-fill a block range, either using hardware offload or by explicitly * writing zeroes to the device. See __blkdev_issue_zeroout() for the * valid values for %flags. */ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask, unsigned flags) { int ret; if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1)) return -EINVAL; if (bdev_read_only(bdev)) return -EPERM; if (bdev_write_zeroes_sectors(bdev)) { ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, flags); if (ret != -EOPNOTSUPP) return ret; } return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags); } EXPORT_SYMBOL(blkdev_issue_zeroout); int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp) { sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev); struct bio *bio = NULL; struct blk_plug plug; int ret = 0; /* make sure that "len << SECTOR_SHIFT" doesn't overflow */ if (max_sectors > UINT_MAX >> SECTOR_SHIFT) max_sectors = UINT_MAX >> SECTOR_SHIFT; max_sectors &= ~bs_mask; if (max_sectors == 0) return -EOPNOTSUPP; if ((sector | nr_sects) & bs_mask) return -EINVAL; if (bdev_read_only(bdev)) return -EPERM; blk_start_plug(&plug); while (nr_sects) { unsigned int len = min_t(sector_t, nr_sects, max_sectors); bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp); bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_size = len << SECTOR_SHIFT; sector += len; nr_sects -= len; cond_resched(); } if (bio) { ret = submit_bio_wait(bio); bio_put(bio); } blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL(blkdev_issue_secure_erase); |
| 13 7 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 | // SPDX-License-Identifier: GPL-2.0-only /* * The Virtio 9p transport driver * * This is a block based transport driver based on the lguest block driver * code. * * Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation * * Based on virtio console driver * Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/in.h> #include <linux/module.h> #include <linux/net.h> #include <linux/ipv6.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/un.h> #include <linux/uaccess.h> #include <linux/inet.h> #include <linux/file.h> #include <linux/highmem.h> #include <linux/slab.h> #include <net/9p/9p.h> #include <linux/parser.h> #include <net/9p/client.h> #include <net/9p/transport.h> #include <linux/scatterlist.h> #include <linux/swap.h> #include <linux/virtio.h> #include <linux/virtio_9p.h> #include "trans_common.h" #define VIRTQUEUE_NUM 128 /* a single mutex to manage channel initialization and attachment */ static DEFINE_MUTEX(virtio_9p_lock); static DECLARE_WAIT_QUEUE_HEAD(vp_wq); static atomic_t vp_pinned = ATOMIC_INIT(0); /** * struct virtio_chan - per-instance transport information * @inuse: whether the channel is in use * @lock: protects multiple elements within this structure * @client: client instance * @vdev: virtio dev associated with this channel * @vq: virtio queue associated with this channel * @ring_bufs_avail: flag to indicate there is some available in the ring buf * @vc_wq: wait queue for waiting for thing to be added to ring buf * @p9_max_pages: maximum number of pinned pages * @sg: scatter gather list which is used to pack a request (protected?) * @chan_list: linked list of channels * * We keep all per-channel information in a structure. * This structure is allocated within the devices dev->mem space. * A pointer to the structure will get put in the transport private. * */ struct virtio_chan { bool inuse; spinlock_t lock; struct p9_client *client; struct virtio_device *vdev; struct virtqueue *vq; int ring_bufs_avail; wait_queue_head_t *vc_wq; /* This is global limit. Since we don't have a global structure, * will be placing it in each channel. */ unsigned long p9_max_pages; /* Scatterlist: can be too big for stack. */ struct scatterlist sg[VIRTQUEUE_NUM]; /** * @tag: name to identify a mount null terminated */ char *tag; struct list_head chan_list; }; static struct list_head virtio_chan_list; /* How many bytes left in this page. */ static unsigned int rest_of_page(void *data) { return PAGE_SIZE - offset_in_page(data); } /** * p9_virtio_close - reclaim resources of a channel * @client: client instance * * This reclaims a channel by freeing its resources and * resetting its inuse flag. * */ static void p9_virtio_close(struct p9_client *client) { struct virtio_chan *chan = client->trans; mutex_lock(&virtio_9p_lock); if (chan) chan->inuse = false; mutex_unlock(&virtio_9p_lock); } /** * req_done - callback which signals activity from the server * @vq: virtio queue activity was received on * * This notifies us that the server has triggered some activity * on the virtio channel - most likely a response to request we * sent. Figure out which requests now have responses and wake up * those threads. * * Bugs: could do with some additional sanity checking, but appears to work. * */ static void req_done(struct virtqueue *vq) { struct virtio_chan *chan = vq->vdev->priv; unsigned int len; struct p9_req_t *req; bool need_wakeup = false; unsigned long flags; p9_debug(P9_DEBUG_TRANS, ": request done\n"); spin_lock_irqsave(&chan->lock, flags); while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) { if (!chan->ring_bufs_avail) { chan->ring_bufs_avail = 1; need_wakeup = true; } if (len) { req->rc.size = len; p9_client_cb(chan->client, req, REQ_STATUS_RCVD); } } spin_unlock_irqrestore(&chan->lock, flags); /* Wakeup if anyone waiting for VirtIO ring space. */ if (need_wakeup) wake_up(chan->vc_wq); } /** * pack_sg_list - pack a scatter gather list from a linear buffer * @sg: scatter/gather list to pack into * @start: which segment of the sg_list to start at * @limit: maximum segment to pack data to * @data: data to pack into scatter/gather list * @count: amount of data to pack into the scatter/gather list * * sg_lists have multiple segments of various sizes. This will pack * arbitrary data into an existing scatter gather list, segmenting the * data as necessary within constraints. * */ static int pack_sg_list(struct scatterlist *sg, int start, int limit, char *data, int count) { int s; int index = start; while (count) { s = rest_of_page(data); if (s > count) s = count; BUG_ON(index >= limit); /* Make sure we don't terminate early. */ sg_unmark_end(&sg[index]); sg_set_buf(&sg[index++], data, s); count -= s; data += s; } if (index-start) sg_mark_end(&sg[index - 1]); return index-start; } /* We don't currently allow canceling of virtio requests */ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req) { return 1; } /* Reply won't come, so drop req ref */ static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req) { p9_req_put(client, req); return 0; } /** * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer, * this takes a list of pages. * @sg: scatter/gather list to pack into * @start: which segment of the sg_list to start at * @limit: maximum number of pages in sg list. * @pdata: a list of pages to add into sg. * @nr_pages: number of pages to pack into the scatter/gather list * @offs: amount of data in the beginning of first page _not_ to pack * @count: amount of data to pack into the scatter/gather list */ static int pack_sg_list_p(struct scatterlist *sg, int start, int limit, struct page **pdata, int nr_pages, size_t offs, int count) { int i = 0, s; int data_off = offs; int index = start; BUG_ON(nr_pages > (limit - start)); /* * if the first page doesn't start at * page boundary find the offset */ while (nr_pages) { s = PAGE_SIZE - data_off; if (s > count) s = count; BUG_ON(index >= limit); /* Make sure we don't terminate early. */ sg_unmark_end(&sg[index]); sg_set_page(&sg[index++], pdata[i++], s, data_off); data_off = 0; count -= s; nr_pages--; } if (index-start) sg_mark_end(&sg[index - 1]); return index - start; } /** * p9_virtio_request - issue a request * @client: client instance issuing the request * @req: request to be issued * */ static int p9_virtio_request(struct p9_client *client, struct p9_req_t *req) { int err; int in, out, out_sgs, in_sgs; unsigned long flags; struct virtio_chan *chan = client->trans; struct scatterlist *sgs[2]; p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n"); WRITE_ONCE(req->status, REQ_STATUS_SENT); req_retry: spin_lock_irqsave(&chan->lock, flags); out_sgs = in_sgs = 0; /* Handle out VirtIO ring buffers */ out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); if (out) sgs[out_sgs++] = chan->sg; in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity); if (in) sgs[out_sgs + in_sgs++] = chan->sg + out; err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC); if (err < 0) { if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); err = wait_event_killable(*chan->vc_wq, chan->ring_bufs_avail); if (err == -ERESTARTSYS) return err; p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); goto req_retry; } else { spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio rpc add_sgs returned failure\n"); return -EIO; } } virtqueue_kick(chan->vq); spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); return 0; } static int p9_get_mapped_pages(struct virtio_chan *chan, struct page ***pages, struct iov_iter *data, int count, size_t *offs, int *need_drop) { int nr_pages; int err; if (!iov_iter_count(data)) return 0; if (!iov_iter_is_kvec(data)) { int n; /* * We allow only p9_max_pages pinned. We wait for the * Other zc request to finish here */ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { err = wait_event_killable(vp_wq, (atomic_read(&vp_pinned) < chan->p9_max_pages)); if (err == -ERESTARTSYS) return err; } n = iov_iter_get_pages_alloc2(data, pages, count, offs); if (n < 0) return n; *need_drop = 1; nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE); atomic_add(nr_pages, &vp_pinned); return n; } else { /* kernel buffer, no need to pin pages */ int index; size_t len; void *p; /* we'd already checked that it's non-empty */ while (1) { len = iov_iter_single_seg_count(data); if (likely(len)) { p = data->kvec->iov_base + data->iov_offset; break; } iov_iter_advance(data, 0); } if (len > count) len = count; nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) - (unsigned long)p / PAGE_SIZE; *pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_NOFS); if (!*pages) return -ENOMEM; *need_drop = 0; p -= (*offs = offset_in_page(p)); for (index = 0; index < nr_pages; index++) { if (is_vmalloc_addr(p)) (*pages)[index] = vmalloc_to_page(p); else (*pages)[index] = kmap_to_page(p); p += PAGE_SIZE; } iov_iter_advance(data, len); return len; } } static void handle_rerror(struct p9_req_t *req, int in_hdr_len, size_t offs, struct page **pages) { unsigned size, n; void *to = req->rc.sdata + in_hdr_len; // Fits entirely into the static data? Nothing to do. if (req->rc.size < in_hdr_len || !pages) return; // Really long error message? Tough, truncate the reply. Might get // rejected (we can't be arsed to adjust the size encoded in header, // or string size for that matter), but it wouldn't be anything valid // anyway. if (unlikely(req->rc.size > P9_ZC_HDR_SZ)) req->rc.size = P9_ZC_HDR_SZ; // data won't span more than two pages size = req->rc.size - in_hdr_len; n = PAGE_SIZE - offs; if (size > n) { memcpy_from_page(to, *pages++, offs, n); offs = 0; to += n; size -= n; } memcpy_from_page(to, *pages, offs, size); } /** * p9_virtio_zc_request - issue a zero copy request * @client: client instance issuing the request * @req: request to be issued * @uidata: user buffer that should be used for zero copy read * @uodata: user buffer that should be used for zero copy write * @inlen: read buffer size * @outlen: write buffer size * @in_hdr_len: reader header size, This is the size of response protocol data * */ static int p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, struct iov_iter *uidata, struct iov_iter *uodata, int inlen, int outlen, int in_hdr_len) { int in, out, err, out_sgs, in_sgs; unsigned long flags; int in_nr_pages = 0, out_nr_pages = 0; struct page **in_pages = NULL, **out_pages = NULL; struct virtio_chan *chan = client->trans; struct scatterlist *sgs[4]; size_t offs = 0; int need_drop = 0; int kicked = 0; p9_debug(P9_DEBUG_TRANS, "virtio request\n"); if (uodata) { __le32 sz; int n = p9_get_mapped_pages(chan, &out_pages, uodata, outlen, &offs, &need_drop); if (n < 0) { err = n; goto err_out; } out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE); if (n != outlen) { __le32 v = cpu_to_le32(n); memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); outlen = n; } /* The size field of the message must include the length of the * header and the length of the data. We didn't actually know * the length of the data until this point so add it in now. */ sz = cpu_to_le32(req->tc.size + outlen); memcpy(&req->tc.sdata[0], &sz, sizeof(sz)); } else if (uidata) { int n = p9_get_mapped_pages(chan, &in_pages, uidata, inlen, &offs, &need_drop); if (n < 0) { err = n; goto err_out; } in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE); if (n != inlen) { __le32 v = cpu_to_le32(n); memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4); inlen = n; } } WRITE_ONCE(req->status, REQ_STATUS_SENT); req_retry_pinned: spin_lock_irqsave(&chan->lock, flags); out_sgs = in_sgs = 0; /* out data */ out = pack_sg_list(chan->sg, 0, VIRTQUEUE_NUM, req->tc.sdata, req->tc.size); if (out) sgs[out_sgs++] = chan->sg; if (out_pages) { sgs[out_sgs++] = chan->sg + out; out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, out_pages, out_nr_pages, offs, outlen); } /* * Take care of in data * For example TREAD have 11. * 11 is the read/write header = PDU Header(7) + IO Size (4). * Arrange in such a way that server places header in the * allocated memory and payload onto the user buffer. */ in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len); if (in) sgs[out_sgs + in_sgs++] = chan->sg + out; if (in_pages) { sgs[out_sgs + in_sgs++] = chan->sg + out + in; pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, in_pages, in_nr_pages, offs, inlen); } BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs)); err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC); if (err < 0) { if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); err = wait_event_killable(*chan->vc_wq, chan->ring_bufs_avail); if (err == -ERESTARTSYS) goto err_out; p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); goto req_retry_pinned; } else { spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio rpc add_sgs returned failure\n"); err = -EIO; goto err_out; } } virtqueue_kick(chan->vq); spin_unlock_irqrestore(&chan->lock, flags); kicked = 1; p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); err = wait_event_killable(req->wq, READ_ONCE(req->status) >= REQ_STATUS_RCVD); // RERROR needs reply (== error string) in static data if (READ_ONCE(req->status) == REQ_STATUS_RCVD && unlikely(req->rc.sdata[4] == P9_RERROR)) handle_rerror(req, in_hdr_len, offs, in_pages); /* * Non kernel buffers are pinned, unpin them */ err_out: if (need_drop) { if (in_pages) { p9_release_pages(in_pages, in_nr_pages); atomic_sub(in_nr_pages, &vp_pinned); } if (out_pages) { p9_release_pages(out_pages, out_nr_pages); atomic_sub(out_nr_pages, &vp_pinned); } /* wakeup anybody waiting for slots to pin pages */ wake_up(&vp_wq); } kvfree(in_pages); kvfree(out_pages); if (!kicked) { /* reply won't come */ p9_req_put(client, req); } return err; } static ssize_t p9_mount_tag_show(struct device *dev, struct device_attribute *attr, char *buf) { struct virtio_chan *chan; struct virtio_device *vdev; int tag_len; vdev = dev_to_virtio(dev); chan = vdev->priv; tag_len = strlen(chan->tag); memcpy(buf, chan->tag, tag_len + 1); return tag_len + 1; } static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL); /** * p9_virtio_probe - probe for existence of 9P virtio channels * @vdev: virtio device to probe * * This probes for existing virtio channels. * */ static int p9_virtio_probe(struct virtio_device *vdev) { __u16 tag_len; char *tag; int err; struct virtio_chan *chan; if (!vdev->config->get) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; } chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); if (!chan) { pr_err("Failed to allocate virtio 9P channel\n"); err = -ENOMEM; goto fail; } chan->vdev = vdev; /* We expect one virtqueue, for requests. */ chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); if (IS_ERR(chan->vq)) { err = PTR_ERR(chan->vq); goto out_free_chan; } chan->vq->vdev->priv = chan; spin_lock_init(&chan->lock); sg_init_table(chan->sg, VIRTQUEUE_NUM); chan->inuse = false; if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) { virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len); } else { err = -EINVAL; goto out_free_vq; } tag = kzalloc(tag_len + 1, GFP_KERNEL); if (!tag) { err = -ENOMEM; goto out_free_vq; } virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag), tag, tag_len); chan->tag = tag; err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); if (err) { goto out_free_tag; } chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); if (!chan->vc_wq) { err = -ENOMEM; goto out_remove_file; } init_waitqueue_head(chan->vc_wq); chan->ring_bufs_avail = 1; /* Ceiling limit to avoid denial of service attacks */ chan->p9_max_pages = nr_free_buffer_pages()/4; virtio_device_ready(vdev); mutex_lock(&virtio_9p_lock); list_add_tail(&chan->chan_list, &virtio_chan_list); mutex_unlock(&virtio_9p_lock); /* Let udev rules use the new mount_tag attribute. */ kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); return 0; out_remove_file: sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr); out_free_tag: kfree(tag); out_free_vq: vdev->config->del_vqs(vdev); out_free_chan: kfree(chan); fail: return err; } /** * p9_virtio_create - allocate a new virtio channel * @client: client instance invoking this transport * @devname: string identifying the channel to connect to (unused) * @args: args passed from sys_mount() for per-transport options (unused) * * This sets up a transport channel for 9p communication. Right now * we only match the first available channel, but eventually we could look up * alternate channels by matching devname versus a virtio_config entry. * We use a simple reference count mechanism to ensure that only a single * mount has a channel open at a time. * */ static int p9_virtio_create(struct p9_client *client, const char *devname, char *args) { struct virtio_chan *chan; int ret = -ENOENT; int found = 0; if (devname == NULL) return -EINVAL; mutex_lock(&virtio_9p_lock); list_for_each_entry(chan, &virtio_chan_list, chan_list) { if (!strcmp(devname, chan->tag)) { if (!chan->inuse) { chan->inuse = true; found = 1; break; } ret = -EBUSY; } } mutex_unlock(&virtio_9p_lock); if (!found) { pr_err("no channels available for device %s\n", devname); return ret; } client->trans = (void *)chan; client->status = Connected; chan->client = client; return 0; } /** * p9_virtio_remove - clean up resources associated with a virtio device * @vdev: virtio device to remove * */ static void p9_virtio_remove(struct virtio_device *vdev) { struct virtio_chan *chan = vdev->priv; unsigned long warning_time; mutex_lock(&virtio_9p_lock); /* Remove self from list so we don't get new users. */ list_del(&chan->chan_list); warning_time = jiffies; /* Wait for existing users to close. */ while (chan->inuse) { mutex_unlock(&virtio_9p_lock); msleep(250); if (time_after(jiffies, warning_time + 10 * HZ)) { dev_emerg(&vdev->dev, "p9_virtio_remove: waiting for device in use.\n"); warning_time = jiffies; } mutex_lock(&virtio_9p_lock); } mutex_unlock(&virtio_9p_lock); virtio_reset_device(vdev); vdev->config->del_vqs(vdev); sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); kfree(chan->tag); kfree(chan->vc_wq); kfree(chan); } static struct virtio_device_id id_table[] = { { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_9P_MOUNT_TAG, }; /* The standard "struct lguest_driver": */ static struct virtio_driver p9_virtio_drv = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .id_table = id_table, .probe = p9_virtio_probe, .remove = p9_virtio_remove, }; static struct p9_trans_module p9_virtio_trans = { .name = "virtio", .create = p9_virtio_create, .close = p9_virtio_close, .request = p9_virtio_request, .zc_request = p9_virtio_zc_request, .cancel = p9_virtio_cancel, .cancelled = p9_virtio_cancelled, /* * We leave one entry for input and one entry for response * headers. We also skip one more entry to accommodate, address * that are not at page boundary, that can result in an extra * page in zero copy. */ .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), .pooled_rbuffers = false, .def = 1, .owner = THIS_MODULE, }; /* The standard init function */ static int __init p9_virtio_init(void) { int rc; INIT_LIST_HEAD(&virtio_chan_list); v9fs_register_trans(&p9_virtio_trans); rc = register_virtio_driver(&p9_virtio_drv); if (rc) v9fs_unregister_trans(&p9_virtio_trans); return rc; } static void __exit p9_virtio_cleanup(void) { unregister_virtio_driver(&p9_virtio_drv); v9fs_unregister_trans(&p9_virtio_trans); } module_init(p9_virtio_init); module_exit(p9_virtio_cleanup); MODULE_ALIAS_9P("virtio"); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>"); MODULE_DESCRIPTION("Virtio 9p Transport"); MODULE_LICENSE("GPL"); |
| 1518 1747 1156 581 581 742 743 507 369 366 228 74 366 50 506 506 506 354 277 47 18 220 220 62 62 62 1226 721 721 66 759 1226 927 68 370 722 626 374 637 70 318 70 14 21 177 280 20 29 1401 1395 777 30 345 370 722 435 1386 1385 1384 1384 1385 823 141 415 1382 1384 1383 1384 1381 1382 1187 363 362 1383 980 985 983 596 1727 1728 1728 1476 285 590 544 1112 1144 1343 4 1728 164 26 142 326 286 56 4 3 52 52 2 41 17 33 25 25 8 48 41 4 164 119 47 113 52 43 5 9 14 42 6 23 20 52 45 41 4 1749 1750 1748 358 121 1106 325 894 106 53 150 426 341 715 44 68 57 40 732 288 908 227 3 26 23 6 918 325 995 996 995 996 995 5 1 5 5 5 3 4 5 2 5 10 10 5 5 5 5 957 955 116 5 5 3 3 2 5 5 5 5 5 3 3 3 1 1 1518 394 6 11 10 3 10 360 83 84 64 64 64 64 64 64 64 64 63 9 64 64 299 26 299 5 5 298 299 299 299 363 329 113 1 1 69 335 3 3 350 50 33 69 46 31 69 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 | // SPDX-License-Identifier: GPL-2.0 /* * fs/ext4/extents_status.c * * Written by Yongqiang Yang <xiaoqiangnk@gmail.com> * Modified by * Allison Henderson <achender@linux.vnet.ibm.com> * Hugh Dickins <hughd@google.com> * Zheng Liu <wenqing.lz@taobao.com> * * Ext4 extents status tree core functions. */ #include <linux/list_sort.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "ext4.h" #include <trace/events/ext4.h> /* * According to previous discussion in Ext4 Developer Workshop, we * will introduce a new structure called io tree to track all extent * status in order to solve some problems that we have met * (e.g. Reservation space warning), and provide extent-level locking. * Delay extent tree is the first step to achieve this goal. It is * original built by Yongqiang Yang. At that time it is called delay * extent tree, whose goal is only track delayed extents in memory to * simplify the implementation of fiemap and bigalloc, and introduce * lseek SEEK_DATA/SEEK_HOLE support. That is why it is still called * delay extent tree at the first commit. But for better understand * what it does, it has been rename to extent status tree. * * Step1: * Currently the first step has been done. All delayed extents are * tracked in the tree. It maintains the delayed extent when a delayed * allocation is issued, and the delayed extent is written out or * invalidated. Therefore the implementation of fiemap and bigalloc * are simplified, and SEEK_DATA/SEEK_HOLE are introduced. * * The following comment describes the implemenmtation of extent * status tree and future works. * * Step2: * In this step all extent status are tracked by extent status tree. * Thus, we can first try to lookup a block mapping in this tree before * finding it in extent tree. Hence, single extent cache can be removed * because extent status tree can do a better job. Extents in status * tree are loaded on-demand. Therefore, the extent status tree may not * contain all of the extents in a file. Meanwhile we define a shrinker * to reclaim memory from extent status tree because fragmented extent * tree will make status tree cost too much memory. written/unwritten/- * hole extents in the tree will be reclaimed by this shrinker when we * are under high memory pressure. Delayed extents will not be * reclimed because fiemap, bigalloc, and seek_data/hole need it. */ /* * Extent status tree implementation for ext4. * * * ========================================================================== * Extent status tree tracks all extent status. * * 1. Why we need to implement extent status tree? * * Without extent status tree, ext4 identifies a delayed extent by looking * up page cache, this has several deficiencies - complicated, buggy, * and inefficient code. * * FIEMAP, SEEK_HOLE/DATA, bigalloc, and writeout all need to know if a * block or a range of blocks are belonged to a delayed extent. * * Let us have a look at how they do without extent status tree. * -- FIEMAP * FIEMAP looks up page cache to identify delayed allocations from holes. * * -- SEEK_HOLE/DATA * SEEK_HOLE/DATA has the same problem as FIEMAP. * * -- bigalloc * bigalloc looks up page cache to figure out if a block is * already under delayed allocation or not to determine whether * quota reserving is needed for the cluster. * * -- writeout * Writeout looks up whole page cache to see if a buffer is * mapped, If there are not very many delayed buffers, then it is * time consuming. * * With extent status tree implementation, FIEMAP, SEEK_HOLE/DATA, * bigalloc and writeout can figure out if a block or a range of * blocks is under delayed allocation(belonged to a delayed extent) or * not by searching the extent tree. * * * ========================================================================== * 2. Ext4 extent status tree impelmentation * * -- extent * A extent is a range of blocks which are contiguous logically and * physically. Unlike extent in extent tree, this extent in ext4 is * a in-memory struct, there is no corresponding on-disk data. There * is no limit on length of extent, so an extent can contain as many * blocks as they are contiguous logically and physically. * * -- extent status tree * Every inode has an extent status tree and all allocation blocks * are added to the tree with different status. The extent in the * tree are ordered by logical block no. * * -- operations on a extent status tree * There are three important operations on a delayed extent tree: find * next extent, adding a extent(a range of blocks) and removing a extent. * * -- race on a extent status tree * Extent status tree is protected by inode->i_es_lock. * * -- memory consumption * Fragmented extent tree will make extent status tree cost too much * memory. Hence, we will reclaim written/unwritten/hole extents from * the tree under a heavy memory pressure. * * * ========================================================================== * 3. Performance analysis * * -- overhead * 1. There is a cache extent for write access, so if writes are * not very random, adding space operaions are in O(1) time. * * -- gain * 2. Code is much simpler, more readable, more maintainable and * more efficient. * * * ========================================================================== * 4. TODO list * * -- Refactor delayed space reservation * * -- Extent-level locking */ static struct kmem_cache *ext4_es_cachep; static struct kmem_cache *ext4_pending_cachep; static int __es_insert_extent(struct inode *inode, struct extent_status *newes, struct extent_status *prealloc); static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t end, int *reserved, struct extent_status *prealloc); static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan); static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, struct ext4_inode_info *locked_ei); static int __revise_pending(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, struct pending_reservation **prealloc); int __init ext4_init_es(void) { ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT); if (ext4_es_cachep == NULL) return -ENOMEM; return 0; } void ext4_exit_es(void) { kmem_cache_destroy(ext4_es_cachep); } void ext4_es_init_tree(struct ext4_es_tree *tree) { tree->root = RB_ROOT; tree->cache_es = NULL; } #ifdef ES_DEBUG__ static void ext4_es_print_tree(struct inode *inode) { struct ext4_es_tree *tree; struct rb_node *node; printk(KERN_DEBUG "status extents for inode %lu:", inode->i_ino); tree = &EXT4_I(inode)->i_es_tree; node = rb_first(&tree->root); while (node) { struct extent_status *es; es = rb_entry(node, struct extent_status, rb_node); printk(KERN_DEBUG " [%u/%u) %llu %x", es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); node = rb_next(node); } printk(KERN_DEBUG "\n"); } #else #define ext4_es_print_tree(inode) #endif static inline ext4_lblk_t ext4_es_end(struct extent_status *es) { BUG_ON(es->es_lblk + es->es_len < es->es_lblk); return es->es_lblk + es->es_len - 1; } /* * search through the tree for an delayed extent with a given offset. If * it can't be found, try to find next extent. */ static struct extent_status *__es_tree_search(struct rb_root *root, ext4_lblk_t lblk) { struct rb_node *node = root->rb_node; struct extent_status *es = NULL; while (node) { es = rb_entry(node, struct extent_status, rb_node); if (lblk < es->es_lblk) node = node->rb_left; else if (lblk > ext4_es_end(es)) node = node->rb_right; else return es; } if (es && lblk < es->es_lblk) return es; if (es && lblk > ext4_es_end(es)) { node = rb_next(&es->rb_node); return node ? rb_entry(node, struct extent_status, rb_node) : NULL; } return NULL; } /* * ext4_es_find_extent_range - find extent with specified status within block * range or next extent following block range in * extents status tree * * @inode - file containing the range * @matching_fn - pointer to function that matches extents with desired status * @lblk - logical block defining start of range * @end - logical block defining end of range * @es - extent found, if any * * Find the first extent within the block range specified by @lblk and @end * in the extents status tree that satisfies @matching_fn. If a match * is found, it's returned in @es. If not, and a matching extent is found * beyond the block range, it's returned in @es. If no match is found, an * extent is returned in @es whose es_lblk, es_len, and es_pblk components * are 0. */ static void __es_find_extent_range(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk, ext4_lblk_t end, struct extent_status *es) { struct ext4_es_tree *tree = NULL; struct extent_status *es1 = NULL; struct rb_node *node; WARN_ON(es == NULL); WARN_ON(end < lblk); tree = &EXT4_I(inode)->i_es_tree; /* see if the extent has been cached */ es->es_lblk = es->es_len = es->es_pblk = 0; es1 = READ_ONCE(tree->cache_es); if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { es_debug("%u cached by [%u/%u) %llu %x\n", lblk, es1->es_lblk, es1->es_len, ext4_es_pblock(es1), ext4_es_status(es1)); goto out; } es1 = __es_tree_search(&tree->root, lblk); out: if (es1 && !matching_fn(es1)) { while ((node = rb_next(&es1->rb_node)) != NULL) { es1 = rb_entry(node, struct extent_status, rb_node); if (es1->es_lblk > end) { es1 = NULL; break; } if (matching_fn(es1)) break; } } if (es1 && matching_fn(es1)) { WRITE_ONCE(tree->cache_es, es1); es->es_lblk = es1->es_lblk; es->es_len = es1->es_len; es->es_pblk = es1->es_pblk; } } /* * Locking for __es_find_extent_range() for external use */ void ext4_es_find_extent_range(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk, ext4_lblk_t end, struct extent_status *es) { es->es_lblk = es->es_len = es->es_pblk = 0; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; trace_ext4_es_find_extent_range_enter(inode, lblk); read_lock(&EXT4_I(inode)->i_es_lock); __es_find_extent_range(inode, matching_fn, lblk, end, es); read_unlock(&EXT4_I(inode)->i_es_lock); trace_ext4_es_find_extent_range_exit(inode, es); } /* * __es_scan_range - search block range for block with specified status * in extents status tree * * @inode - file containing the range * @matching_fn - pointer to function that matches extents with desired status * @lblk - logical block defining start of range * @end - logical block defining end of range * * Returns true if at least one block in the specified block range satisfies * the criterion specified by @matching_fn, and false if not. If at least * one extent has the specified status, then there is at least one block * in the cluster with that status. Should only be called by code that has * taken i_es_lock. */ static bool __es_scan_range(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t start, ext4_lblk_t end) { struct extent_status es; __es_find_extent_range(inode, matching_fn, start, end, &es); if (es.es_len == 0) return false; /* no matching extent in the tree */ else if (es.es_lblk <= start && start < es.es_lblk + es.es_len) return true; else if (start <= es.es_lblk && es.es_lblk <= end) return true; else return false; } /* * Locking for __es_scan_range() for external use */ bool ext4_es_scan_range(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk, ext4_lblk_t end) { bool ret; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return false; read_lock(&EXT4_I(inode)->i_es_lock); ret = __es_scan_range(inode, matching_fn, lblk, end); read_unlock(&EXT4_I(inode)->i_es_lock); return ret; } /* * __es_scan_clu - search cluster for block with specified status in * extents status tree * * @inode - file containing the cluster * @matching_fn - pointer to function that matches extents with desired status * @lblk - logical block in cluster to be searched * * Returns true if at least one extent in the cluster containing @lblk * satisfies the criterion specified by @matching_fn, and false if not. If at * least one extent has the specified status, then there is at least one block * in the cluster with that status. Should only be called by code that has * taken i_es_lock. */ static bool __es_scan_clu(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t lblk_start, lblk_end; lblk_start = EXT4_LBLK_CMASK(sbi, lblk); lblk_end = lblk_start + sbi->s_cluster_ratio - 1; return __es_scan_range(inode, matching_fn, lblk_start, lblk_end); } /* * Locking for __es_scan_clu() for external use */ bool ext4_es_scan_clu(struct inode *inode, int (*matching_fn)(struct extent_status *es), ext4_lblk_t lblk) { bool ret; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return false; read_lock(&EXT4_I(inode)->i_es_lock); ret = __es_scan_clu(inode, matching_fn, lblk); read_unlock(&EXT4_I(inode)->i_es_lock); return ret; } static void ext4_es_list_add(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); if (!list_empty(&ei->i_es_list)) return; spin_lock(&sbi->s_es_lock); if (list_empty(&ei->i_es_list)) { list_add_tail(&ei->i_es_list, &sbi->s_es_list); sbi->s_es_nr_inode++; } spin_unlock(&sbi->s_es_lock); } static void ext4_es_list_del(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); spin_lock(&sbi->s_es_lock); if (!list_empty(&ei->i_es_list)) { list_del_init(&ei->i_es_list); sbi->s_es_nr_inode--; WARN_ON_ONCE(sbi->s_es_nr_inode < 0); } spin_unlock(&sbi->s_es_lock); } static inline struct pending_reservation *__alloc_pending(bool nofail) { if (!nofail) return kmem_cache_alloc(ext4_pending_cachep, GFP_ATOMIC); return kmem_cache_zalloc(ext4_pending_cachep, GFP_KERNEL | __GFP_NOFAIL); } static inline void __free_pending(struct pending_reservation *pr) { kmem_cache_free(ext4_pending_cachep, pr); } /* * Returns true if we cannot fail to allocate memory for this extent_status * entry and cannot reclaim it until its status changes. */ static inline bool ext4_es_must_keep(struct extent_status *es) { /* fiemap, bigalloc, and seek_data/hole need to use it. */ if (ext4_es_is_delayed(es)) return true; return false; } static inline struct extent_status *__es_alloc_extent(bool nofail) { if (!nofail) return kmem_cache_alloc(ext4_es_cachep, GFP_ATOMIC); return kmem_cache_zalloc(ext4_es_cachep, GFP_KERNEL | __GFP_NOFAIL); } static void ext4_es_init_extent(struct inode *inode, struct extent_status *es, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk) { es->es_lblk = lblk; es->es_len = len; es->es_pblk = pblk; /* We never try to reclaim a must kept extent, so we don't count it. */ if (!ext4_es_must_keep(es)) { if (!EXT4_I(inode)->i_es_shk_nr++) ext4_es_list_add(inode); percpu_counter_inc(&EXT4_SB(inode->i_sb)-> s_es_stats.es_stats_shk_cnt); } EXT4_I(inode)->i_es_all_nr++; percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); } static inline void __es_free_extent(struct extent_status *es) { kmem_cache_free(ext4_es_cachep, es); } static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) { EXT4_I(inode)->i_es_all_nr--; percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); /* Decrease the shrink counter when we can reclaim the extent. */ if (!ext4_es_must_keep(es)) { BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0); if (!--EXT4_I(inode)->i_es_shk_nr) ext4_es_list_del(inode); percpu_counter_dec(&EXT4_SB(inode->i_sb)-> s_es_stats.es_stats_shk_cnt); } __es_free_extent(es); } /* * Check whether or not two extents can be merged * Condition: * - logical block number is contiguous * - physical block number is contiguous * - status is equal */ static int ext4_es_can_be_merged(struct extent_status *es1, struct extent_status *es2) { if (ext4_es_type(es1) != ext4_es_type(es2)) return 0; if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { pr_warn("ES assertion failed when merging extents. " "The sum of lengths of es1 (%d) and es2 (%d) " "is bigger than allowed file size (%d)\n", es1->es_len, es2->es_len, EXT_MAX_BLOCKS); WARN_ON(1); return 0; } if (((__u64) es1->es_lblk) + es1->es_len != es2->es_lblk) return 0; if ((ext4_es_is_written(es1) || ext4_es_is_unwritten(es1)) && (ext4_es_pblock(es1) + es1->es_len == ext4_es_pblock(es2))) return 1; if (ext4_es_is_hole(es1)) return 1; /* we need to check delayed extent is without unwritten status */ if (ext4_es_is_delayed(es1) && !ext4_es_is_unwritten(es1)) return 1; return 0; } static struct extent_status * ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct extent_status *es1; struct rb_node *node; node = rb_prev(&es->rb_node); if (!node) return es; es1 = rb_entry(node, struct extent_status, rb_node); if (ext4_es_can_be_merged(es1, es)) { es1->es_len += es->es_len; if (ext4_es_is_referenced(es)) ext4_es_set_referenced(es1); rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); es = es1; } return es; } static struct extent_status * ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct extent_status *es1; struct rb_node *node; node = rb_next(&es->rb_node); if (!node) return es; es1 = rb_entry(node, struct extent_status, rb_node); if (ext4_es_can_be_merged(es, es1)) { es->es_len += es1->es_len; if (ext4_es_is_referenced(es1)) ext4_es_set_referenced(es); rb_erase(node, &tree->root); ext4_es_free_extent(inode, es1); } return es; } #ifdef ES_AGGRESSIVE_TEST #include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */ static void ext4_es_insert_extent_ext_check(struct inode *inode, struct extent_status *es) { struct ext4_ext_path *path = NULL; struct ext4_extent *ex; ext4_lblk_t ee_block; ext4_fsblk_t ee_start; unsigned short ee_len; int depth, ee_status, es_status; path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); if (IS_ERR(path)) return; depth = ext_depth(inode); ex = path[depth].p_ext; if (ex) { ee_block = le32_to_cpu(ex->ee_block); ee_start = ext4_ext_pblock(ex); ee_len = ext4_ext_get_actual_len(ex); ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0; es_status = ext4_es_is_unwritten(es) ? 1 : 0; /* * Make sure ex and es are not overlap when we try to insert * a delayed/hole extent. */ if (!ext4_es_is_written(es) && !ext4_es_is_unwritten(es)) { if (in_range(es->es_lblk, ee_block, ee_len)) { pr_warn("ES insert assertion failed for " "inode: %lu we can find an extent " "at block [%d/%d/%llu/%c], but we " "want to add a delayed/hole extent " "[%d/%d/%llu/%x]\n", inode->i_ino, ee_block, ee_len, ee_start, ee_status ? 'u' : 'w', es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); } goto out; } /* * We don't check ee_block == es->es_lblk, etc. because es * might be a part of whole extent, vice versa. */ if (es->es_lblk < ee_block || ext4_es_pblock(es) != ee_start + es->es_lblk - ee_block) { pr_warn("ES insert assertion failed for inode: %lu " "ex_status [%d/%d/%llu/%c] != " "es_status [%d/%d/%llu/%c]\n", inode->i_ino, ee_block, ee_len, ee_start, ee_status ? 'u' : 'w', es->es_lblk, es->es_len, ext4_es_pblock(es), es_status ? 'u' : 'w'); goto out; } if (ee_status ^ es_status) { pr_warn("ES insert assertion failed for inode: %lu " "ex_status [%d/%d/%llu/%c] != " "es_status [%d/%d/%llu/%c]\n", inode->i_ino, ee_block, ee_len, ee_start, ee_status ? 'u' : 'w', es->es_lblk, es->es_len, ext4_es_pblock(es), es_status ? 'u' : 'w'); } } else { /* * We can't find an extent on disk. So we need to make sure * that we don't want to add an written/unwritten extent. */ if (!ext4_es_is_delayed(es) && !ext4_es_is_hole(es)) { pr_warn("ES insert assertion failed for inode: %lu " "can't find an extent at block %d but we want " "to add a written/unwritten extent " "[%d/%d/%llu/%x]\n", inode->i_ino, es->es_lblk, es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); } } out: ext4_free_ext_path(path); } static void ext4_es_insert_extent_ind_check(struct inode *inode, struct extent_status *es) { struct ext4_map_blocks map; int retval; /* * Here we call ext4_ind_map_blocks to lookup a block mapping because * 'Indirect' structure is defined in indirect.c. So we couldn't * access direct/indirect tree from outside. It is too dirty to define * this function in indirect.c file. */ map.m_lblk = es->es_lblk; map.m_len = es->es_len; retval = ext4_ind_map_blocks(NULL, inode, &map, 0); if (retval > 0) { if (ext4_es_is_delayed(es) || ext4_es_is_hole(es)) { /* * We want to add a delayed/hole extent but this * block has been allocated. */ pr_warn("ES insert assertion failed for inode: %lu " "We can find blocks but we want to add a " "delayed/hole extent [%d/%d/%llu/%x]\n", inode->i_ino, es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); return; } else if (ext4_es_is_written(es)) { if (retval != es->es_len) { pr_warn("ES insert assertion failed for " "inode: %lu retval %d != es_len %d\n", inode->i_ino, retval, es->es_len); return; } if (map.m_pblk != ext4_es_pblock(es)) { pr_warn("ES insert assertion failed for " "inode: %lu m_pblk %llu != " "es_pblk %llu\n", inode->i_ino, map.m_pblk, ext4_es_pblock(es)); return; } } else { /* * We don't need to check unwritten extent because * indirect-based file doesn't have it. */ BUG(); } } else if (retval == 0) { if (ext4_es_is_written(es)) { pr_warn("ES insert assertion failed for inode: %lu " "We can't find the block but we want to add " "a written extent [%d/%d/%llu/%x]\n", inode->i_ino, es->es_lblk, es->es_len, ext4_es_pblock(es), ext4_es_status(es)); return; } } } static inline void ext4_es_insert_extent_check(struct inode *inode, struct extent_status *es) { /* * We don't need to worry about the race condition because * caller takes i_data_sem locking. */ BUG_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem)); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) ext4_es_insert_extent_ext_check(inode, es); else ext4_es_insert_extent_ind_check(inode, es); } #else static inline void ext4_es_insert_extent_check(struct inode *inode, struct extent_status *es) { } #endif static int __es_insert_extent(struct inode *inode, struct extent_status *newes, struct extent_status *prealloc) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct rb_node **p = &tree->root.rb_node; struct rb_node *parent = NULL; struct extent_status *es; while (*p) { parent = *p; es = rb_entry(parent, struct extent_status, rb_node); if (newes->es_lblk < es->es_lblk) { if (ext4_es_can_be_merged(newes, es)) { /* * Here we can modify es_lblk directly * because it isn't overlapped. */ es->es_lblk = newes->es_lblk; es->es_len += newes->es_len; if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) ext4_es_store_pblock(es, newes->es_pblk); es = ext4_es_try_to_merge_left(inode, es); goto out; } p = &(*p)->rb_left; } else if (newes->es_lblk > ext4_es_end(es)) { if (ext4_es_can_be_merged(es, newes)) { es->es_len += newes->es_len; es = ext4_es_try_to_merge_right(inode, es); goto out; } p = &(*p)->rb_right; } else { BUG(); return -EINVAL; } } if (prealloc) es = prealloc; else es = __es_alloc_extent(false); if (!es) return -ENOMEM; ext4_es_init_extent(inode, es, newes->es_lblk, newes->es_len, newes->es_pblk); rb_link_node(&es->rb_node, parent, p); rb_insert_color(&es->rb_node, &tree->root); out: tree->cache_es = es; return 0; } /* * ext4_es_insert_extent() adds information to an inode's extent * status tree. */ void ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk, unsigned int status) { struct extent_status newes; ext4_lblk_t end = lblk + len - 1; int err1 = 0, err2 = 0, err3 = 0; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct extent_status *es1 = NULL; struct extent_status *es2 = NULL; struct pending_reservation *pr = NULL; bool revise_pending = false; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n", lblk, len, pblk, status, inode->i_ino); if (!len) return; BUG_ON(end < lblk); if ((status & EXTENT_STATUS_DELAYED) && (status & EXTENT_STATUS_WRITTEN)) { ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as " " delayed and written which can potentially " " cause data loss.", lblk, len); WARN_ON(1); } newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, pblk, status); trace_ext4_es_insert_extent(inode, &newes); ext4_es_insert_extent_check(inode, &newes); revise_pending = sbi->s_cluster_ratio > 1 && test_opt(inode->i_sb, DELALLOC) && (status & (EXTENT_STATUS_WRITTEN | EXTENT_STATUS_UNWRITTEN)); retry: if (err1 && !es1) es1 = __es_alloc_extent(true); if ((err1 || err2) && !es2) es2 = __es_alloc_extent(true); if ((err1 || err2 || err3) && revise_pending && !pr) pr = __alloc_pending(true); write_lock(&EXT4_I(inode)->i_es_lock); err1 = __es_remove_extent(inode, lblk, end, NULL, es1); if (err1 != 0) goto error; /* Free preallocated extent if it didn't get used. */ if (es1) { if (!es1->es_len) __es_free_extent(es1); es1 = NULL; } err2 = __es_insert_extent(inode, &newes, es2); if (err2 == -ENOMEM && !ext4_es_must_keep(&newes)) err2 = 0; if (err2 != 0) goto error; /* Free preallocated extent if it didn't get used. */ if (es2) { if (!es2->es_len) __es_free_extent(es2); es2 = NULL; } if (revise_pending) { err3 = __revise_pending(inode, lblk, len, &pr); if (err3 != 0) goto error; if (pr) { __free_pending(pr); pr = NULL; } } error: write_unlock(&EXT4_I(inode)->i_es_lock); if (err1 || err2 || err3) goto retry; ext4_es_print_tree(inode); return; } /* * ext4_es_cache_extent() inserts information into the extent status * tree if and only if there isn't information about the range in * question already. */ void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, ext4_fsblk_t pblk, unsigned int status) { struct extent_status *es; struct extent_status newes; ext4_lblk_t end = lblk + len - 1; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, pblk, status); trace_ext4_es_cache_extent(inode, &newes); if (!len) return; BUG_ON(end < lblk); write_lock(&EXT4_I(inode)->i_es_lock); es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk); if (!es || es->es_lblk > end) __es_insert_extent(inode, &newes, NULL); write_unlock(&EXT4_I(inode)->i_es_lock); } /* * ext4_es_lookup_extent() looks up an extent in extent status tree. * * ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks. * * Return: 1 on found, 0 on not */ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t *next_lblk, struct extent_status *es) { struct ext4_es_tree *tree; struct ext4_es_stats *stats; struct extent_status *es1 = NULL; struct rb_node *node; int found = 0; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return 0; trace_ext4_es_lookup_extent_enter(inode, lblk); es_debug("lookup extent in block %u\n", lblk); tree = &EXT4_I(inode)->i_es_tree; read_lock(&EXT4_I(inode)->i_es_lock); /* find extent in cache firstly */ es->es_lblk = es->es_len = es->es_pblk = 0; es1 = READ_ONCE(tree->cache_es); if (es1 && in_range(lblk, es1->es_lblk, es1->es_len)) { es_debug("%u cached by [%u/%u)\n", lblk, es1->es_lblk, es1->es_len); found = 1; goto out; } node = tree->root.rb_node; while (node) { es1 = rb_entry(node, struct extent_status, rb_node); if (lblk < es1->es_lblk) node = node->rb_left; else if (lblk > ext4_es_end(es1)) node = node->rb_right; else { found = 1; break; } } out: stats = &EXT4_SB(inode->i_sb)->s_es_stats; if (found) { BUG_ON(!es1); es->es_lblk = es1->es_lblk; es->es_len = es1->es_len; es->es_pblk = es1->es_pblk; if (!ext4_es_is_referenced(es1)) ext4_es_set_referenced(es1); percpu_counter_inc(&stats->es_stats_cache_hits); if (next_lblk) { node = rb_next(&es1->rb_node); if (node) { es1 = rb_entry(node, struct extent_status, rb_node); *next_lblk = es1->es_lblk; } else *next_lblk = 0; } } else { percpu_counter_inc(&stats->es_stats_cache_misses); } read_unlock(&EXT4_I(inode)->i_es_lock); trace_ext4_es_lookup_extent_exit(inode, es, found); return found; } struct rsvd_count { int ndelonly; bool first_do_lblk_found; ext4_lblk_t first_do_lblk; ext4_lblk_t last_do_lblk; struct extent_status *left_es; bool partial; ext4_lblk_t lclu; }; /* * init_rsvd - initialize reserved count data before removing block range * in file from extent status tree * * @inode - file containing range * @lblk - first block in range * @es - pointer to first extent in range * @rc - pointer to reserved count data * * Assumes es is not NULL */ static void init_rsvd(struct inode *inode, ext4_lblk_t lblk, struct extent_status *es, struct rsvd_count *rc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct rb_node *node; rc->ndelonly = 0; /* * for bigalloc, note the first delonly block in the range has not * been found, record the extent containing the block to the left of * the region to be removed, if any, and note that there's no partial * cluster to track */ if (sbi->s_cluster_ratio > 1) { rc->first_do_lblk_found = false; if (lblk > es->es_lblk) { rc->left_es = es; } else { node = rb_prev(&es->rb_node); rc->left_es = node ? rb_entry(node, struct extent_status, rb_node) : NULL; } rc->partial = false; } } /* * count_rsvd - count the clusters containing delayed and not unwritten * (delonly) blocks in a range within an extent and add to * the running tally in rsvd_count * * @inode - file containing extent * @lblk - first block in range * @len - length of range in blocks * @es - pointer to extent containing clusters to be counted * @rc - pointer to reserved count data * * Tracks partial clusters found at the beginning and end of extents so * they aren't overcounted when they span adjacent extents */ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len, struct extent_status *es, struct rsvd_count *rc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t i, end, nclu; if (!ext4_es_is_delonly(es)) return; WARN_ON(len <= 0); if (sbi->s_cluster_ratio == 1) { rc->ndelonly += (int) len; return; } /* bigalloc */ i = (lblk < es->es_lblk) ? es->es_lblk : lblk; end = lblk + (ext4_lblk_t) len - 1; end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end; /* record the first block of the first delonly extent seen */ if (!rc->first_do_lblk_found) { rc->first_do_lblk = i; rc->first_do_lblk_found = true; } /* update the last lblk in the region seen so far */ rc->last_do_lblk = end; /* * if we're tracking a partial cluster and the current extent * doesn't start with it, count it and stop tracking */ if (rc->partial && (rc->lclu != EXT4_B2C(sbi, i))) { rc->ndelonly++; rc->partial = false; } /* * if the first cluster doesn't start on a cluster boundary but * ends on one, count it */ if (EXT4_LBLK_COFF(sbi, i) != 0) { if (end >= EXT4_LBLK_CFILL(sbi, i)) { rc->ndelonly++; rc->partial = false; i = EXT4_LBLK_CFILL(sbi, i) + 1; } } /* * if the current cluster starts on a cluster boundary, count the * number of whole delonly clusters in the extent */ if ((i + sbi->s_cluster_ratio - 1) <= end) { nclu = (end - i + 1) >> sbi->s_cluster_bits; rc->ndelonly += nclu; i += nclu << sbi->s_cluster_bits; } /* * start tracking a partial cluster if there's a partial at the end * of the current extent and we're not already tracking one */ if (!rc->partial && i <= end) { rc->partial = true; rc->lclu = EXT4_B2C(sbi, i); } } /* * __pr_tree_search - search for a pending cluster reservation * * @root - root of pending reservation tree * @lclu - logical cluster to search for * * Returns the pending reservation for the cluster identified by @lclu * if found. If not, returns a reservation for the next cluster if any, * and if not, returns NULL. */ static struct pending_reservation *__pr_tree_search(struct rb_root *root, ext4_lblk_t lclu) { struct rb_node *node = root->rb_node; struct pending_reservation *pr = NULL; while (node) { pr = rb_entry(node, struct pending_reservation, rb_node); if (lclu < pr->lclu) node = node->rb_left; else if (lclu > pr->lclu) node = node->rb_right; else return pr; } if (pr && lclu < pr->lclu) return pr; if (pr && lclu > pr->lclu) { node = rb_next(&pr->rb_node); return node ? rb_entry(node, struct pending_reservation, rb_node) : NULL; } return NULL; } /* * get_rsvd - calculates and returns the number of cluster reservations to be * released when removing a block range from the extent status tree * and releases any pending reservations within the range * * @inode - file containing block range * @end - last block in range * @right_es - pointer to extent containing next block beyond end or NULL * @rc - pointer to reserved count data * * The number of reservations to be released is equal to the number of * clusters containing delayed and not unwritten (delonly) blocks within * the range, minus the number of clusters still containing delonly blocks * at the ends of the range, and minus the number of pending reservations * within the range. */ static unsigned int get_rsvd(struct inode *inode, ext4_lblk_t end, struct extent_status *right_es, struct rsvd_count *rc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct pending_reservation *pr; struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; struct rb_node *node; ext4_lblk_t first_lclu, last_lclu; bool left_delonly, right_delonly, count_pending; struct extent_status *es; if (sbi->s_cluster_ratio > 1) { /* count any remaining partial cluster */ if (rc->partial) rc->ndelonly++; if (rc->ndelonly == 0) return 0; first_lclu = EXT4_B2C(sbi, rc->first_do_lblk); last_lclu = EXT4_B2C(sbi, rc->last_do_lblk); /* * decrease the delonly count by the number of clusters at the * ends of the range that still contain delonly blocks - * these clusters still need to be reserved */ left_delonly = right_delonly = false; es = rc->left_es; while (es && ext4_es_end(es) >= EXT4_LBLK_CMASK(sbi, rc->first_do_lblk)) { if (ext4_es_is_delonly(es)) { rc->ndelonly--; left_delonly = true; break; } node = rb_prev(&es->rb_node); if (!node) break; es = rb_entry(node, struct extent_status, rb_node); } if (right_es && (!left_delonly || first_lclu != last_lclu)) { if (end < ext4_es_end(right_es)) { es = right_es; } else { node = rb_next(&right_es->rb_node); es = node ? rb_entry(node, struct extent_status, rb_node) : NULL; } while (es && es->es_lblk <= EXT4_LBLK_CFILL(sbi, rc->last_do_lblk)) { if (ext4_es_is_delonly(es)) { rc->ndelonly--; right_delonly = true; break; } node = rb_next(&es->rb_node); if (!node) break; es = rb_entry(node, struct extent_status, rb_node); } } /* * Determine the block range that should be searched for * pending reservations, if any. Clusters on the ends of the * original removed range containing delonly blocks are * excluded. They've already been accounted for and it's not * possible to determine if an associated pending reservation * should be released with the information available in the * extents status tree. */ if (first_lclu == last_lclu) { if (left_delonly | right_delonly) count_pending = false; else count_pending = true; } else { if (left_delonly) first_lclu++; if (right_delonly) last_lclu--; if (first_lclu <= last_lclu) count_pending = true; else count_pending = false; } /* * a pending reservation found between first_lclu and last_lclu * represents an allocated cluster that contained at least one * delonly block, so the delonly total must be reduced by one * for each pending reservation found and released */ if (count_pending) { pr = __pr_tree_search(&tree->root, first_lclu); while (pr && pr->lclu <= last_lclu) { rc->ndelonly--; node = rb_next(&pr->rb_node); rb_erase(&pr->rb_node, &tree->root); __free_pending(pr); if (!node) break; pr = rb_entry(node, struct pending_reservation, rb_node); } } } return rc->ndelonly; } /* * __es_remove_extent - removes block range from extent status tree * * @inode - file containing range * @lblk - first block in range * @end - last block in range * @reserved - number of cluster reservations released * @prealloc - pre-allocated es to avoid memory allocation failures * * If @reserved is not NULL and delayed allocation is enabled, counts * block/cluster reservations freed by removing range and if bigalloc * enabled cancels pending reservations as needed. Returns 0 on success, * error code on failure. */ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t end, int *reserved, struct extent_status *prealloc) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct rb_node *node; struct extent_status *es; struct extent_status orig_es; ext4_lblk_t len1, len2; ext4_fsblk_t block; int err = 0; bool count_reserved = true; struct rsvd_count rc; if (reserved == NULL || !test_opt(inode->i_sb, DELALLOC)) count_reserved = false; es = __es_tree_search(&tree->root, lblk); if (!es) goto out; if (es->es_lblk > end) goto out; /* Simply invalidate cache_es. */ tree->cache_es = NULL; if (count_reserved) init_rsvd(inode, lblk, es, &rc); orig_es.es_lblk = es->es_lblk; orig_es.es_len = es->es_len; orig_es.es_pblk = es->es_pblk; len1 = lblk > es->es_lblk ? lblk - es->es_lblk : 0; len2 = ext4_es_end(es) > end ? ext4_es_end(es) - end : 0; if (len1 > 0) es->es_len = len1; if (len2 > 0) { if (len1 > 0) { struct extent_status newes; newes.es_lblk = end + 1; newes.es_len = len2; block = 0x7FDEADBEEFULL; if (ext4_es_is_written(&orig_es) || ext4_es_is_unwritten(&orig_es)) block = ext4_es_pblock(&orig_es) + orig_es.es_len - len2; ext4_es_store_pblock_status(&newes, block, ext4_es_status(&orig_es)); err = __es_insert_extent(inode, &newes, prealloc); if (err) { if (!ext4_es_must_keep(&newes)) return 0; es->es_lblk = orig_es.es_lblk; es->es_len = orig_es.es_len; goto out; } } else { es->es_lblk = end + 1; es->es_len = len2; if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { block = orig_es.es_pblk + orig_es.es_len - len2; ext4_es_store_pblock(es, block); } } if (count_reserved) count_rsvd(inode, orig_es.es_lblk + len1, orig_es.es_len - len1 - len2, &orig_es, &rc); goto out_get_reserved; } if (len1 > 0) { if (count_reserved) count_rsvd(inode, lblk, orig_es.es_len - len1, &orig_es, &rc); node = rb_next(&es->rb_node); if (node) es = rb_entry(node, struct extent_status, rb_node); else es = NULL; } while (es && ext4_es_end(es) <= end) { if (count_reserved) count_rsvd(inode, es->es_lblk, es->es_len, es, &rc); node = rb_next(&es->rb_node); rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); if (!node) { es = NULL; break; } es = rb_entry(node, struct extent_status, rb_node); } if (es && es->es_lblk < end + 1) { ext4_lblk_t orig_len = es->es_len; len1 = ext4_es_end(es) - end; if (count_reserved) count_rsvd(inode, es->es_lblk, orig_len - len1, es, &rc); es->es_lblk = end + 1; es->es_len = len1; if (ext4_es_is_written(es) || ext4_es_is_unwritten(es)) { block = es->es_pblk + orig_len - len1; ext4_es_store_pblock(es, block); } } out_get_reserved: if (count_reserved) *reserved = get_rsvd(inode, end, es, &rc); out: return err; } /* * ext4_es_remove_extent - removes block range from extent status tree * * @inode - file containing range * @lblk - first block in range * @len - number of blocks to remove * * Reduces block/cluster reservation count and for bigalloc cancels pending * reservations as needed. */ void ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len) { ext4_lblk_t end; int err = 0; int reserved = 0; struct extent_status *es = NULL; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; trace_ext4_es_remove_extent(inode, lblk, len); es_debug("remove [%u/%u) from extent status tree of inode %lu\n", lblk, len, inode->i_ino); if (!len) return; end = lblk + len - 1; BUG_ON(end < lblk); retry: if (err && !es) es = __es_alloc_extent(true); /* * ext4_clear_inode() depends on us taking i_es_lock unconditionally * so that we are sure __es_shrink() is done with the inode before it * is reclaimed. */ write_lock(&EXT4_I(inode)->i_es_lock); err = __es_remove_extent(inode, lblk, end, &reserved, es); /* Free preallocated extent if it didn't get used. */ if (es) { if (!es->es_len) __es_free_extent(es); es = NULL; } write_unlock(&EXT4_I(inode)->i_es_lock); if (err) goto retry; ext4_es_print_tree(inode); ext4_da_release_space(inode, reserved); return; } static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, struct ext4_inode_info *locked_ei) { struct ext4_inode_info *ei; struct ext4_es_stats *es_stats; ktime_t start_time; u64 scan_time; int nr_to_walk; int nr_shrunk = 0; int retried = 0, nr_skipped = 0; es_stats = &sbi->s_es_stats; start_time = ktime_get(); retry: spin_lock(&sbi->s_es_lock); nr_to_walk = sbi->s_es_nr_inode; while (nr_to_walk-- > 0) { if (list_empty(&sbi->s_es_list)) { spin_unlock(&sbi->s_es_lock); goto out; } ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info, i_es_list); /* Move the inode to the tail */ list_move_tail(&ei->i_es_list, &sbi->s_es_list); /* * Normally we try hard to avoid shrinking precached inodes, * but we will as a last resort. */ if (!retried && ext4_test_inode_state(&ei->vfs_inode, EXT4_STATE_EXT_PRECACHED)) { nr_skipped++; continue; } if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) { nr_skipped++; continue; } /* * Now we hold i_es_lock which protects us from inode reclaim * freeing inode under us */ spin_unlock(&sbi->s_es_lock); nr_shrunk += es_reclaim_extents(ei, &nr_to_scan); write_unlock(&ei->i_es_lock); if (nr_to_scan <= 0) goto out; spin_lock(&sbi->s_es_lock); } spin_unlock(&sbi->s_es_lock); /* * If we skipped any inodes, and we weren't able to make any * forward progress, try again to scan precached inodes. */ if ((nr_shrunk == 0) && nr_skipped && !retried) { retried++; goto retry; } if (locked_ei && nr_shrunk == 0) nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan); out: scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); if (likely(es_stats->es_stats_scan_time)) es_stats->es_stats_scan_time = (scan_time + es_stats->es_stats_scan_time*3) / 4; else es_stats->es_stats_scan_time = scan_time; if (scan_time > es_stats->es_stats_max_scan_time) es_stats->es_stats_max_scan_time = scan_time; if (likely(es_stats->es_stats_shrunk)) es_stats->es_stats_shrunk = (nr_shrunk + es_stats->es_stats_shrunk*3) / 4; else es_stats->es_stats_shrunk = nr_shrunk; trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, nr_skipped, retried); return nr_shrunk; } static unsigned long ext4_es_count(struct shrinker *shrink, struct shrink_control *sc) { unsigned long nr; struct ext4_sb_info *sbi; sbi = shrink->private_data; nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); return nr; } static unsigned long ext4_es_scan(struct shrinker *shrink, struct shrink_control *sc) { struct ext4_sb_info *sbi = shrink->private_data; int nr_to_scan = sc->nr_to_scan; int ret, nr_shrunk; ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL); ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt); trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); return nr_shrunk; } int ext4_seq_es_shrinker_info_show(struct seq_file *seq, void *v) { struct ext4_sb_info *sbi = EXT4_SB((struct super_block *) seq->private); struct ext4_es_stats *es_stats = &sbi->s_es_stats; struct ext4_inode_info *ei, *max = NULL; unsigned int inode_cnt = 0; if (v != SEQ_START_TOKEN) return 0; /* here we just find an inode that has the max nr. of objects */ spin_lock(&sbi->s_es_lock); list_for_each_entry(ei, &sbi->s_es_list, i_es_list) { inode_cnt++; if (max && max->i_es_all_nr < ei->i_es_all_nr) max = ei; else if (!max) max = ei; } spin_unlock(&sbi->s_es_lock); seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt)); seq_printf(seq, " %lld/%lld cache hits/misses\n", percpu_counter_sum_positive(&es_stats->es_stats_cache_hits), percpu_counter_sum_positive(&es_stats->es_stats_cache_misses)); if (inode_cnt) seq_printf(seq, " %d inodes on list\n", inode_cnt); seq_printf(seq, "average:\n %llu us scan time\n", div_u64(es_stats->es_stats_scan_time, 1000)); seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk); if (inode_cnt) seq_printf(seq, "maximum:\n %lu inode (%u objects, %u reclaimable)\n" " %llu us max scan time\n", max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr, div_u64(es_stats->es_stats_max_scan_time, 1000)); return 0; } int ext4_es_register_shrinker(struct ext4_sb_info *sbi) { int err; /* Make sure we have enough bits for physical block number */ BUILD_BUG_ON(ES_SHIFT < 48); INIT_LIST_HEAD(&sbi->s_es_list); sbi->s_es_nr_inode = 0; spin_lock_init(&sbi->s_es_lock); sbi->s_es_stats.es_stats_shrunk = 0; err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_hits, 0, GFP_KERNEL); if (err) return err; err = percpu_counter_init(&sbi->s_es_stats.es_stats_cache_misses, 0, GFP_KERNEL); if (err) goto err1; sbi->s_es_stats.es_stats_scan_time = 0; sbi->s_es_stats.es_stats_max_scan_time = 0; err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); if (err) goto err2; err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL); if (err) goto err3; sbi->s_es_shrinker = shrinker_alloc(0, "ext4-es:%s", sbi->s_sb->s_id); if (!sbi->s_es_shrinker) { err = -ENOMEM; goto err4; } sbi->s_es_shrinker->scan_objects = ext4_es_scan; sbi->s_es_shrinker->count_objects = ext4_es_count; sbi->s_es_shrinker->private_data = sbi; shrinker_register(sbi->s_es_shrinker); return 0; err4: percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); err3: percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); err2: percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); err1: percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); return err; } void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) { percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_hits); percpu_counter_destroy(&sbi->s_es_stats.es_stats_cache_misses); percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt); shrinker_free(sbi->s_es_shrinker); } /* * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at * most *nr_to_scan extents, update *nr_to_scan accordingly. * * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan. * Increment *nr_shrunk by the number of reclaimed extents. Also update * ei->i_es_shrink_lblk to where we should continue scanning. */ static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end, int *nr_to_scan, int *nr_shrunk) { struct inode *inode = &ei->vfs_inode; struct ext4_es_tree *tree = &ei->i_es_tree; struct extent_status *es; struct rb_node *node; es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk); if (!es) goto out_wrap; while (*nr_to_scan > 0) { if (es->es_lblk > end) { ei->i_es_shrink_lblk = end + 1; return 0; } (*nr_to_scan)--; node = rb_next(&es->rb_node); if (ext4_es_must_keep(es)) goto next; if (ext4_es_is_referenced(es)) { ext4_es_clear_referenced(es); goto next; } rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); (*nr_shrunk)++; next: if (!node) goto out_wrap; es = rb_entry(node, struct extent_status, rb_node); } ei->i_es_shrink_lblk = es->es_lblk; return 1; out_wrap: ei->i_es_shrink_lblk = 0; return 0; } static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan) { struct inode *inode = &ei->vfs_inode; int nr_shrunk = 0; ext4_lblk_t start = ei->i_es_shrink_lblk; static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); if (ei->i_es_shk_nr == 0) return 0; if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && __ratelimit(&_rs)) ext4_warning(inode->i_sb, "forced shrink of precached extents"); if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) && start != 0) es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk); ei->i_es_tree.cache_es = NULL; return nr_shrunk; } /* * Called to support EXT4_IOC_CLEAR_ES_CACHE. We can only remove * discretionary entries from the extent status cache. (Some entries * must be present for proper operations.) */ void ext4_clear_inode_es(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct extent_status *es; struct ext4_es_tree *tree; struct rb_node *node; write_lock(&ei->i_es_lock); tree = &EXT4_I(inode)->i_es_tree; tree->cache_es = NULL; node = rb_first(&tree->root); while (node) { es = rb_entry(node, struct extent_status, rb_node); node = rb_next(node); if (!ext4_es_must_keep(es)) { rb_erase(&es->rb_node, &tree->root); ext4_es_free_extent(inode, es); } } ext4_clear_inode_state(inode, EXT4_STATE_EXT_PRECACHED); write_unlock(&ei->i_es_lock); } #ifdef ES_DEBUG__ static void ext4_print_pending_tree(struct inode *inode) { struct ext4_pending_tree *tree; struct rb_node *node; struct pending_reservation *pr; printk(KERN_DEBUG "pending reservations for inode %lu:", inode->i_ino); tree = &EXT4_I(inode)->i_pending_tree; node = rb_first(&tree->root); while (node) { pr = rb_entry(node, struct pending_reservation, rb_node); printk(KERN_DEBUG " %u", pr->lclu); node = rb_next(node); } printk(KERN_DEBUG "\n"); } #else #define ext4_print_pending_tree(inode) #endif int __init ext4_init_pending(void) { ext4_pending_cachep = KMEM_CACHE(pending_reservation, SLAB_RECLAIM_ACCOUNT); if (ext4_pending_cachep == NULL) return -ENOMEM; return 0; } void ext4_exit_pending(void) { kmem_cache_destroy(ext4_pending_cachep); } void ext4_init_pending_tree(struct ext4_pending_tree *tree) { tree->root = RB_ROOT; } /* * __get_pending - retrieve a pointer to a pending reservation * * @inode - file containing the pending cluster reservation * @lclu - logical cluster of interest * * Returns a pointer to a pending reservation if it's a member of * the set, and NULL if not. Must be called holding i_es_lock. */ static struct pending_reservation *__get_pending(struct inode *inode, ext4_lblk_t lclu) { struct ext4_pending_tree *tree; struct rb_node *node; struct pending_reservation *pr = NULL; tree = &EXT4_I(inode)->i_pending_tree; node = (&tree->root)->rb_node; while (node) { pr = rb_entry(node, struct pending_reservation, rb_node); if (lclu < pr->lclu) node = node->rb_left; else if (lclu > pr->lclu) node = node->rb_right; else if (lclu == pr->lclu) return pr; } return NULL; } /* * __insert_pending - adds a pending cluster reservation to the set of * pending reservations * * @inode - file containing the cluster * @lblk - logical block in the cluster to be added * @prealloc - preallocated pending entry * * Returns 0 on successful insertion and -ENOMEM on failure. If the * pending reservation is already in the set, returns successfully. */ static int __insert_pending(struct inode *inode, ext4_lblk_t lblk, struct pending_reservation **prealloc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_pending_tree *tree = &EXT4_I(inode)->i_pending_tree; struct rb_node **p = &tree->root.rb_node; struct rb_node *parent = NULL; struct pending_reservation *pr; ext4_lblk_t lclu; int ret = 0; lclu = EXT4_B2C(sbi, lblk); /* search to find parent for insertion */ while (*p) { parent = *p; pr = rb_entry(parent, struct pending_reservation, rb_node); if (lclu < pr->lclu) { p = &(*p)->rb_left; } else if (lclu > pr->lclu) { p = &(*p)->rb_right; } else { /* pending reservation already inserted */ goto out; } } if (likely(*prealloc == NULL)) { pr = __alloc_pending(false); if (!pr) { ret = -ENOMEM; goto out; } } else { pr = *prealloc; *prealloc = NULL; } pr->lclu = lclu; rb_link_node(&pr->rb_node, parent, p); rb_insert_color(&pr->rb_node, &tree->root); out: return ret; } /* * __remove_pending - removes a pending cluster reservation from the set * of pending reservations * * @inode - file containing the cluster * @lblk - logical block in the pending cluster reservation to be removed * * Returns successfully if pending reservation is not a member of the set. */ static void __remove_pending(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct pending_reservation *pr; struct ext4_pending_tree *tree; pr = __get_pending(inode, EXT4_B2C(sbi, lblk)); if (pr != NULL) { tree = &EXT4_I(inode)->i_pending_tree; rb_erase(&pr->rb_node, &tree->root); __free_pending(pr); } } /* * ext4_remove_pending - removes a pending cluster reservation from the set * of pending reservations * * @inode - file containing the cluster * @lblk - logical block in the pending cluster reservation to be removed * * Locking for external use of __remove_pending. */ void ext4_remove_pending(struct inode *inode, ext4_lblk_t lblk) { struct ext4_inode_info *ei = EXT4_I(inode); write_lock(&ei->i_es_lock); __remove_pending(inode, lblk); write_unlock(&ei->i_es_lock); } /* * ext4_is_pending - determine whether a cluster has a pending reservation * on it * * @inode - file containing the cluster * @lblk - logical block in the cluster * * Returns true if there's a pending reservation for the cluster in the * set of pending reservations, and false if not. */ bool ext4_is_pending(struct inode *inode, ext4_lblk_t lblk) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); bool ret; read_lock(&ei->i_es_lock); ret = (bool)(__get_pending(inode, EXT4_B2C(sbi, lblk)) != NULL); read_unlock(&ei->i_es_lock); return ret; } /* * ext4_es_insert_delayed_extent - adds some delayed blocks to the extents * status tree, adding a pending reservation * where needed * * @inode - file containing the newly added block * @lblk - start logical block to be added * @len - length of blocks to be added * @lclu_allocated/end_allocated - indicates whether a physical cluster has * been allocated for the logical cluster * that contains the start/end block. Note that * end_allocated should always be set to false * if the start and the end block are in the * same cluster */ void ext4_es_insert_delayed_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, bool lclu_allocated, bool end_allocated) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct extent_status newes; ext4_lblk_t end = lblk + len - 1; int err1 = 0, err2 = 0, err3 = 0; struct extent_status *es1 = NULL; struct extent_status *es2 = NULL; struct pending_reservation *pr1 = NULL; struct pending_reservation *pr2 = NULL; if (EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) return; es_debug("add [%u/%u) delayed to extent status tree of inode %lu\n", lblk, len, inode->i_ino); if (!len) return; WARN_ON_ONCE((EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) && end_allocated); newes.es_lblk = lblk; newes.es_len = len; ext4_es_store_pblock_status(&newes, ~0, EXTENT_STATUS_DELAYED); trace_ext4_es_insert_delayed_extent(inode, &newes, lclu_allocated, end_allocated); ext4_es_insert_extent_check(inode, &newes); retry: if (err1 && !es1) es1 = __es_alloc_extent(true); if ((err1 || err2) && !es2) es2 = __es_alloc_extent(true); if (err1 || err2 || err3) { if (lclu_allocated && !pr1) pr1 = __alloc_pending(true); if (end_allocated && !pr2) pr2 = __alloc_pending(true); } write_lock(&EXT4_I(inode)->i_es_lock); err1 = __es_remove_extent(inode, lblk, end, NULL, es1); if (err1 != 0) goto error; /* Free preallocated extent if it didn't get used. */ if (es1) { if (!es1->es_len) __es_free_extent(es1); es1 = NULL; } err2 = __es_insert_extent(inode, &newes, es2); if (err2 != 0) goto error; /* Free preallocated extent if it didn't get used. */ if (es2) { if (!es2->es_len) __es_free_extent(es2); es2 = NULL; } if (lclu_allocated) { err3 = __insert_pending(inode, lblk, &pr1); if (err3 != 0) goto error; if (pr1) { __free_pending(pr1); pr1 = NULL; } } if (end_allocated) { err3 = __insert_pending(inode, end, &pr2); if (err3 != 0) goto error; if (pr2) { __free_pending(pr2); pr2 = NULL; } } error: write_unlock(&EXT4_I(inode)->i_es_lock); if (err1 || err2 || err3) goto retry; ext4_es_print_tree(inode); ext4_print_pending_tree(inode); return; } /* * __es_delayed_clu - count number of clusters containing blocks that * are delayed only * * @inode - file containing block range * @start - logical block defining start of range * @end - logical block defining end of range * * Returns the number of clusters containing only delayed (not delayed * and unwritten) blocks in the range specified by @start and @end. Any * cluster or part of a cluster within the range and containing a delayed * and not unwritten block within the range is counted as a whole cluster. */ static unsigned int __es_delayed_clu(struct inode *inode, ext4_lblk_t start, ext4_lblk_t end) { struct ext4_es_tree *tree = &EXT4_I(inode)->i_es_tree; struct extent_status *es; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct rb_node *node; ext4_lblk_t first_lclu, last_lclu; unsigned long long last_counted_lclu; unsigned int n = 0; /* guaranteed to be unequal to any ext4_lblk_t value */ last_counted_lclu = ~0ULL; es = __es_tree_search(&tree->root, start); while (es && (es->es_lblk <= end)) { if (ext4_es_is_delonly(es)) { if (es->es_lblk <= start) first_lclu = EXT4_B2C(sbi, start); else first_lclu = EXT4_B2C(sbi, es->es_lblk); if (ext4_es_end(es) >= end) last_lclu = EXT4_B2C(sbi, end); else last_lclu = EXT4_B2C(sbi, ext4_es_end(es)); if (first_lclu == last_counted_lclu) n += last_lclu - first_lclu; else n += last_lclu - first_lclu + 1; last_counted_lclu = last_lclu; } node = rb_next(&es->rb_node); if (!node) break; es = rb_entry(node, struct extent_status, rb_node); } return n; } /* * ext4_es_delayed_clu - count number of clusters containing blocks that * are both delayed and unwritten * * @inode - file containing block range * @lblk - logical block defining start of range * @len - number of blocks in range * * Locking for external use of __es_delayed_clu(). */ unsigned int ext4_es_delayed_clu(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len) { struct ext4_inode_info *ei = EXT4_I(inode); ext4_lblk_t end; unsigned int n; if (len == 0) return 0; end = lblk + len - 1; WARN_ON(end < lblk); read_lock(&ei->i_es_lock); n = __es_delayed_clu(inode, lblk, end); read_unlock(&ei->i_es_lock); return n; } /* * __revise_pending - makes, cancels, or leaves unchanged pending cluster * reservations for a specified block range depending * upon the presence or absence of delayed blocks * outside the range within clusters at the ends of the * range * * @inode - file containing the range * @lblk - logical block defining the start of range * @len - length of range in blocks * @prealloc - preallocated pending entry * * Used after a newly allocated extent is added to the extents status tree. * Requires that the extents in the range have either written or unwritten * status. Must be called while holding i_es_lock. */ static int __revise_pending(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, struct pending_reservation **prealloc) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); ext4_lblk_t end = lblk + len - 1; ext4_lblk_t first, last; bool f_del = false, l_del = false; int ret = 0; if (len == 0) return 0; /* * Two cases - block range within single cluster and block range * spanning two or more clusters. Note that a cluster belonging * to a range starting and/or ending on a cluster boundary is treated * as if it does not contain a delayed extent. The new range may * have allocated space for previously delayed blocks out to the * cluster boundary, requiring that any pre-existing pending * reservation be canceled. Because this code only looks at blocks * outside the range, it should revise pending reservations * correctly even if the extent represented by the range can't be * inserted in the extents status tree due to ENOSPC. */ if (EXT4_B2C(sbi, lblk) == EXT4_B2C(sbi, end)) { first = EXT4_LBLK_CMASK(sbi, lblk); if (first != lblk) f_del = __es_scan_range(inode, &ext4_es_is_delonly, first, lblk - 1); if (f_del) { ret = __insert_pending(inode, first, prealloc); if (ret < 0) goto out; } else { last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; if (last != end) l_del = __es_scan_range(inode, &ext4_es_is_delonly, end + 1, last); if (l_del) { ret = __insert_pending(inode, last, prealloc); if (ret < 0) goto out; } else __remove_pending(inode, last); } } else { first = EXT4_LBLK_CMASK(sbi, lblk); if (first != lblk) f_del = __es_scan_range(inode, &ext4_es_is_delonly, first, lblk - 1); if (f_del) { ret = __insert_pending(inode, first, prealloc); if (ret < 0) goto out; } else __remove_pending(inode, first); last = EXT4_LBLK_CMASK(sbi, end) + sbi->s_cluster_ratio - 1; if (last != end) l_del = __es_scan_range(inode, &ext4_es_is_delonly, end + 1, last); if (l_del) { ret = __insert_pending(inode, last, prealloc); if (ret < 0) goto out; } else __remove_pending(inode, last); } out: return ret; } |
| 105 1 12 93 93 12 1 13 54 1 4 1 1 1 2 2 2 1 2 8 360 382 1 8 377 36 21 41 53 1 36 50 98 4 93 2 1 93 62 62 62 12 51 31 8 3 71 22 101 2 66 33 485 487 12 474 12 9 4 20 7 5 4 7 7 9 26 2 29 28 6 2 4 48 6 42 4 44 5 43 52 56 49 11 57 46 8 102 4 1 81 25 4 98 107 3 1 86 24 5 5 17 3 14 14 14 20 3 16 1 16 39 8 29 37 12 55 2 73 4 67 65 26 6 18 24 632 4 3 1 679 10 667 1 2 2 672 42 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 | // SPDX-License-Identifier: GPL-2.0+ /* * 2002-10-15 Posix Clocks & timers * by George Anzinger george@mvista.com * Copyright (C) 2002 2003 by MontaVista Software. * * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. * Copyright (C) 2004 Boris Hu * * These are all the functions necessary to implement POSIX clocks & timers */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/mutex.h> #include <linux/sched/task.h> #include <linux/uaccess.h> #include <linux/list.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/hash.h> #include <linux/posix-clock.h> #include <linux/posix-timers.h> #include <linux/syscalls.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/export.h> #include <linux/hashtable.h> #include <linux/compat.h> #include <linux/nospec.h> #include <linux/time_namespace.h> #include "timekeeping.h" #include "posix-timers.h" static struct kmem_cache *posix_timers_cache; /* * Timers are managed in a hash table for lockless lookup. The hash key is * constructed from current::signal and the timer ID and the timer is * matched against current::signal and the timer ID when walking the hash * bucket list. * * This allows checkpoint/restore to reconstruct the exact timer IDs for * a process. */ static DEFINE_HASHTABLE(posix_timers_hashtable, 9); static DEFINE_SPINLOCK(hash_lock); static const struct k_clock * const posix_clocks[]; static const struct k_clock *clockid_to_kclock(const clockid_t id); static const struct k_clock clock_realtime, clock_monotonic; /* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */ #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \ ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD)) #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!" #endif static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags); #define lock_timer(tid, flags) \ ({ struct k_itimer *__timr; \ __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \ __timr; \ }) static int hash(struct signal_struct *sig, unsigned int nr) { return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable)); } static struct k_itimer *__posix_timers_find(struct hlist_head *head, struct signal_struct *sig, timer_t id) { struct k_itimer *timer; hlist_for_each_entry_rcu(timer, head, t_hash, lockdep_is_held(&hash_lock)) { /* timer->it_signal can be set concurrently */ if ((READ_ONCE(timer->it_signal) == sig) && (timer->it_id == id)) return timer; } return NULL; } static struct k_itimer *posix_timer_by_id(timer_t id) { struct signal_struct *sig = current->signal; struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)]; return __posix_timers_find(head, sig, id); } static int posix_timer_add(struct k_itimer *timer) { struct signal_struct *sig = current->signal; struct hlist_head *head; unsigned int cnt, id; /* * FIXME: Replace this by a per signal struct xarray once there is * a plan to handle the resulting CRIU regression gracefully. */ for (cnt = 0; cnt <= INT_MAX; cnt++) { spin_lock(&hash_lock); id = sig->next_posix_timer_id; /* Write the next ID back. Clamp it to the positive space */ sig->next_posix_timer_id = (id + 1) & INT_MAX; head = &posix_timers_hashtable[hash(sig, id)]; if (!__posix_timers_find(head, sig, id)) { hlist_add_head_rcu(&timer->t_hash, head); spin_unlock(&hash_lock); return id; } spin_unlock(&hash_lock); } /* POSIX return code when no timer ID could be allocated */ return -EAGAIN; } static inline void unlock_timer(struct k_itimer *timr, unsigned long flags) { spin_unlock_irqrestore(&timr->it_lock, flags); } static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp) { ktime_get_real_ts64(tp); return 0; } static ktime_t posix_get_realtime_ktime(clockid_t which_clock) { return ktime_get_real(); } static int posix_clock_realtime_set(const clockid_t which_clock, const struct timespec64 *tp) { return do_sys_settimeofday64(tp, NULL); } static int posix_clock_realtime_adj(const clockid_t which_clock, struct __kernel_timex *t) { return do_adjtimex(t); } static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp) { ktime_get_ts64(tp); timens_add_monotonic(tp); return 0; } static ktime_t posix_get_monotonic_ktime(clockid_t which_clock) { return ktime_get(); } static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) { ktime_get_raw_ts64(tp); timens_add_monotonic(tp); return 0; } static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp) { ktime_get_coarse_real_ts64(tp); return 0; } static int posix_get_monotonic_coarse(clockid_t which_clock, struct timespec64 *tp) { ktime_get_coarse_ts64(tp); timens_add_monotonic(tp); return 0; } static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp) { *tp = ktime_to_timespec64(KTIME_LOW_RES); return 0; } static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp) { ktime_get_boottime_ts64(tp); timens_add_boottime(tp); return 0; } static ktime_t posix_get_boottime_ktime(const clockid_t which_clock) { return ktime_get_boottime(); } static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp) { ktime_get_clocktai_ts64(tp); return 0; } static ktime_t posix_get_tai_ktime(clockid_t which_clock) { return ktime_get_clocktai(); } static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) { tp->tv_sec = 0; tp->tv_nsec = hrtimer_resolution; return 0; } static __init int init_posix_timers(void) { posix_timers_cache = kmem_cache_create("posix_timers_cache", sizeof(struct k_itimer), 0, SLAB_PANIC | SLAB_ACCOUNT, NULL); return 0; } __initcall(init_posix_timers); /* * The siginfo si_overrun field and the return value of timer_getoverrun(2) * are of type int. Clamp the overrun value to INT_MAX */ static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval) { s64 sum = timr->it_overrun_last + (s64)baseval; return sum > (s64)INT_MAX ? INT_MAX : (int)sum; } static void common_hrtimer_rearm(struct k_itimer *timr) { struct hrtimer *timer = &timr->it.real.timer; timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), timr->it_interval); hrtimer_restart(timer); } /* * This function is called from the signal delivery code if * info->si_sys_private is not zero, which indicates that the timer has to * be rearmed. Restart the timer and update info::si_overrun. */ void posixtimer_rearm(struct kernel_siginfo *info) { struct k_itimer *timr; unsigned long flags; timr = lock_timer(info->si_tid, &flags); if (!timr) return; if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) { timr->kclock->timer_rearm(timr); timr->it_active = 1; timr->it_overrun_last = timr->it_overrun; timr->it_overrun = -1LL; ++timr->it_requeue_pending; info->si_overrun = timer_overrun_to_int(timr, info->si_overrun); } unlock_timer(timr, flags); } int posix_timer_event(struct k_itimer *timr, int si_private) { enum pid_type type; int ret; /* * FIXME: if ->sigq is queued we can race with * dequeue_signal()->posixtimer_rearm(). * * If dequeue_signal() sees the "right" value of * si_sys_private it calls posixtimer_rearm(). * We re-queue ->sigq and drop ->it_lock(). * posixtimer_rearm() locks the timer * and re-schedules it while ->sigq is pending. * Not really bad, but not that we want. */ timr->sigq->info.si_sys_private = si_private; type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID; ret = send_sigqueue(timr->sigq, timr->it_pid, type); /* If we failed to send the signal the timer stops. */ return ret > 0; } /* * This function gets called when a POSIX.1b interval timer expires from * the HRTIMER interrupt (soft interrupt on RT kernels). * * Handles CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME and CLOCK_TAI * based timers. */ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) { enum hrtimer_restart ret = HRTIMER_NORESTART; struct k_itimer *timr; unsigned long flags; int si_private = 0; timr = container_of(timer, struct k_itimer, it.real.timer); spin_lock_irqsave(&timr->it_lock, flags); timr->it_active = 0; if (timr->it_interval != 0) si_private = ++timr->it_requeue_pending; if (posix_timer_event(timr, si_private)) { /* * The signal was not queued due to SIG_IGN. As a * consequence the timer is not going to be rearmed from * the signal delivery path. But as a real signal handler * can be installed later the timer must be rearmed here. */ if (timr->it_interval != 0) { ktime_t now = hrtimer_cb_get_time(timer); /* * FIXME: What we really want, is to stop this * timer completely and restart it in case the * SIG_IGN is removed. This is a non trivial * change to the signal handling code. * * For now let timers with an interval less than a * jiffie expire every jiffie and recheck for a * valid signal handler. * * This avoids interrupt starvation in case of a * very small interval, which would expire the * timer immediately again. * * Moving now ahead of time by one jiffie tricks * hrtimer_forward() to expire the timer later, * while it still maintains the overrun accuracy * for the price of a slight inconsistency in the * timer_gettime() case. This is at least better * than a timer storm. * * Only required when high resolution timers are * enabled as the periodic tick based timers are * automatically aligned to the next tick. */ if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) { ktime_t kj = TICK_NSEC; if (timr->it_interval < kj) now = ktime_add(now, kj); } timr->it_overrun += hrtimer_forward(timer, now, timr->it_interval); ret = HRTIMER_RESTART; ++timr->it_requeue_pending; timr->it_active = 1; } } unlock_timer(timr, flags); return ret; } static struct pid *good_sigevent(sigevent_t * event) { struct pid *pid = task_tgid(current); struct task_struct *rtn; switch (event->sigev_notify) { case SIGEV_SIGNAL | SIGEV_THREAD_ID: pid = find_vpid(event->sigev_notify_thread_id); rtn = pid_task(pid, PIDTYPE_PID); if (!rtn || !same_thread_group(rtn, current)) return NULL; fallthrough; case SIGEV_SIGNAL: case SIGEV_THREAD: if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) return NULL; fallthrough; case SIGEV_NONE: return pid; default: return NULL; } } static struct k_itimer * alloc_posix_timer(void) { struct k_itimer *tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL); if (!tmr) return tmr; if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { kmem_cache_free(posix_timers_cache, tmr); return NULL; } clear_siginfo(&tmr->sigq->info); return tmr; } static void k_itimer_rcu_free(struct rcu_head *head) { struct k_itimer *tmr = container_of(head, struct k_itimer, rcu); kmem_cache_free(posix_timers_cache, tmr); } static void posix_timer_free(struct k_itimer *tmr) { put_pid(tmr->it_pid); sigqueue_free(tmr->sigq); call_rcu(&tmr->rcu, k_itimer_rcu_free); } static void posix_timer_unhash_and_free(struct k_itimer *tmr) { spin_lock(&hash_lock); hlist_del_rcu(&tmr->t_hash); spin_unlock(&hash_lock); posix_timer_free(tmr); } static int common_timer_create(struct k_itimer *new_timer) { hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0); return 0; } /* Create a POSIX.1b interval timer. */ static int do_timer_create(clockid_t which_clock, struct sigevent *event, timer_t __user *created_timer_id) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct k_itimer *new_timer; int error, new_timer_id; if (!kc) return -EINVAL; if (!kc->timer_create) return -EOPNOTSUPP; new_timer = alloc_posix_timer(); if (unlikely(!new_timer)) return -EAGAIN; spin_lock_init(&new_timer->it_lock); /* * Add the timer to the hash table. The timer is not yet valid * because new_timer::it_signal is still NULL. The timer id is also * not yet visible to user space. */ new_timer_id = posix_timer_add(new_timer); if (new_timer_id < 0) { posix_timer_free(new_timer); return new_timer_id; } new_timer->it_id = (timer_t) new_timer_id; new_timer->it_clock = which_clock; new_timer->kclock = kc; new_timer->it_overrun = -1LL; if (event) { rcu_read_lock(); new_timer->it_pid = get_pid(good_sigevent(event)); rcu_read_unlock(); if (!new_timer->it_pid) { error = -EINVAL; goto out; } new_timer->it_sigev_notify = event->sigev_notify; new_timer->sigq->info.si_signo = event->sigev_signo; new_timer->sigq->info.si_value = event->sigev_value; } else { new_timer->it_sigev_notify = SIGEV_SIGNAL; new_timer->sigq->info.si_signo = SIGALRM; memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t)); new_timer->sigq->info.si_value.sival_int = new_timer->it_id; new_timer->it_pid = get_pid(task_tgid(current)); } new_timer->sigq->info.si_tid = new_timer->it_id; new_timer->sigq->info.si_code = SI_TIMER; if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) { error = -EFAULT; goto out; } /* * After succesful copy out, the timer ID is visible to user space * now but not yet valid because new_timer::signal is still NULL. * * Complete the initialization with the clock specific create * callback. */ error = kc->timer_create(new_timer); if (error) goto out; spin_lock_irq(¤t->sighand->siglock); /* This makes the timer valid in the hash table */ WRITE_ONCE(new_timer->it_signal, current->signal); list_add(&new_timer->list, ¤t->signal->posix_timers); spin_unlock_irq(¤t->sighand->siglock); /* * After unlocking sighand::siglock @new_timer is subject to * concurrent removal and cannot be touched anymore */ return 0; out: posix_timer_unhash_and_free(new_timer); return error; } SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, struct sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { if (timer_event_spec) { sigevent_t event; if (copy_from_user(&event, timer_event_spec, sizeof (event))) return -EFAULT; return do_timer_create(which_clock, &event, created_timer_id); } return do_timer_create(which_clock, NULL, created_timer_id); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock, struct compat_sigevent __user *, timer_event_spec, timer_t __user *, created_timer_id) { if (timer_event_spec) { sigevent_t event; if (get_compat_sigevent(&event, timer_event_spec)) return -EFAULT; return do_timer_create(which_clock, &event, created_timer_id); } return do_timer_create(which_clock, NULL, created_timer_id); } #endif static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags) { struct k_itimer *timr; /* * timer_t could be any type >= int and we want to make sure any * @timer_id outside positive int range fails lookup. */ if ((unsigned long long)timer_id > INT_MAX) return NULL; /* * The hash lookup and the timers are RCU protected. * * Timers are added to the hash in invalid state where * timr::it_signal == NULL. timer::it_signal is only set after the * rest of the initialization succeeded. * * Timer destruction happens in steps: * 1) Set timr::it_signal to NULL with timr::it_lock held * 2) Release timr::it_lock * 3) Remove from the hash under hash_lock * 4) Call RCU for removal after the grace period * * Holding rcu_read_lock() accross the lookup ensures that * the timer cannot be freed. * * The lookup validates locklessly that timr::it_signal == * current::it_signal and timr::it_id == @timer_id. timr::it_id * can't change, but timr::it_signal becomes NULL during * destruction. */ rcu_read_lock(); timr = posix_timer_by_id(timer_id); if (timr) { spin_lock_irqsave(&timr->it_lock, *flags); /* * Validate under timr::it_lock that timr::it_signal is * still valid. Pairs with #1 above. */ if (timr->it_signal == current->signal) { rcu_read_unlock(); return timr; } spin_unlock_irqrestore(&timr->it_lock, *flags); } rcu_read_unlock(); return NULL; } static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; return __hrtimer_expires_remaining_adjusted(timer, now); } static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now) { struct hrtimer *timer = &timr->it.real.timer; return hrtimer_forward(timer, now, timr->it_interval); } /* * Get the time remaining on a POSIX.1b interval timer. * * Two issues to handle here: * * 1) The timer has a requeue pending. The return value must appear as * if the timer has been requeued right now. * * 2) The timer is a SIGEV_NONE timer. These timers are never enqueued * into the hrtimer queue and therefore never expired. Emulate expiry * here taking #1 into account. */ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting) { const struct k_clock *kc = timr->kclock; ktime_t now, remaining, iv; bool sig_none; sig_none = timr->it_sigev_notify == SIGEV_NONE; iv = timr->it_interval; /* interval timer ? */ if (iv) { cur_setting->it_interval = ktime_to_timespec64(iv); } else if (!timr->it_active) { /* * SIGEV_NONE oneshot timers are never queued and therefore * timr->it_active is always false. The check below * vs. remaining time will handle this case. * * For all other timers there is nothing to update here, so * return. */ if (!sig_none) return; } now = kc->clock_get_ktime(timr->it_clock); /* * If this is an interval timer and either has requeue pending or * is a SIGEV_NONE timer move the expiry time forward by intervals, * so expiry is > now. */ if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none)) timr->it_overrun += kc->timer_forward(timr, now); remaining = kc->timer_remaining(timr, now); /* * As @now is retrieved before a possible timer_forward() and * cannot be reevaluated by the compiler @remaining is based on the * same @now value. Therefore @remaining is consistent vs. @now. * * Consequently all interval timers, i.e. @iv > 0, cannot have a * remaining time <= 0 because timer_forward() guarantees to move * them forward so that the next timer expiry is > @now. */ if (remaining <= 0) { /* * A single shot SIGEV_NONE timer must return 0, when it is * expired! Timers which have a real signal delivery mode * must return a remaining time greater than 0 because the * signal has not yet been delivered. */ if (!sig_none) cur_setting->it_value.tv_nsec = 1; } else { cur_setting->it_value = ktime_to_timespec64(remaining); } } static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting) { const struct k_clock *kc; struct k_itimer *timr; unsigned long flags; int ret = 0; timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; memset(setting, 0, sizeof(*setting)); kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_get)) ret = -EINVAL; else kc->timer_get(timr, setting); unlock_timer(timr, flags); return ret; } /* Get the time remaining on a POSIX.1b interval timer. */ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, struct __kernel_itimerspec __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { if (put_itimerspec64(&cur_setting, setting)) ret = -EFAULT; } return ret; } #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id, struct old_itimerspec32 __user *, setting) { struct itimerspec64 cur_setting; int ret = do_timer_gettime(timer_id, &cur_setting); if (!ret) { if (put_old_itimerspec32(&cur_setting, setting)) ret = -EFAULT; } return ret; } #endif /** * sys_timer_getoverrun - Get the number of overruns of a POSIX.1b interval timer * @timer_id: The timer ID which identifies the timer * * The "overrun count" of a timer is one plus the number of expiration * intervals which have elapsed between the first expiry, which queues the * signal and the actual signal delivery. On signal delivery the "overrun * count" is calculated and cached, so it can be returned directly here. * * As this is relative to the last queued signal the returned overrun count * is meaningless outside of the signal delivery path and even there it * does not accurately reflect the current state when user space evaluates * it. * * Returns: * -EINVAL @timer_id is invalid * 1..INT_MAX The number of overruns related to the last delivered signal */ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) { struct k_itimer *timr; unsigned long flags; int overrun; timr = lock_timer(timer_id, &flags); if (!timr) return -EINVAL; overrun = timer_overrun_to_int(timr, 0); unlock_timer(timr, flags); return overrun; } static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires, bool absolute, bool sigev_none) { struct hrtimer *timer = &timr->it.real.timer; enum hrtimer_mode mode; mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL; /* * Posix magic: Relative CLOCK_REALTIME timers are not affected by * clock modifications, so they become CLOCK_MONOTONIC based under the * hood. See hrtimer_init(). Update timr->kclock, so the generic * functions which use timr->kclock->clock_get_*() work. * * Note: it_clock stays unmodified, because the next timer_set() might * use ABSTIME, so it needs to switch back. */ if (timr->it_clock == CLOCK_REALTIME) timr->kclock = absolute ? &clock_realtime : &clock_monotonic; hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); timr->it.real.timer.function = posix_timer_fn; if (!absolute) expires = ktime_add_safe(expires, timer->base->get_time()); hrtimer_set_expires(timer, expires); if (!sigev_none) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } static int common_hrtimer_try_to_cancel(struct k_itimer *timr) { return hrtimer_try_to_cancel(&timr->it.real.timer); } static void common_timer_wait_running(struct k_itimer *timer) { hrtimer_cancel_wait_running(&timer->it.real.timer); } /* * On PREEMPT_RT this prevents priority inversion and a potential livelock * against the ksoftirqd thread in case that ksoftirqd gets preempted while * executing a hrtimer callback. * * See the comments in hrtimer_cancel_wait_running(). For PREEMPT_RT=n this * just results in a cpu_relax(). * * For POSIX CPU timers with CONFIG_POSIX_CPU_TIMERS_TASK_WORK=n this is * just a cpu_relax(). With CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y this * prevents spinning on an eventually scheduled out task and a livelock * when the task which tries to delete or disarm the timer has preempted * the task which runs the expiry in task work context. */ static struct k_itimer *timer_wait_running(struct k_itimer *timer, unsigned long *flags) { const struct k_clock *kc = READ_ONCE(timer->kclock); timer_t timer_id = READ_ONCE(timer->it_id); /* Prevent kfree(timer) after dropping the lock */ rcu_read_lock(); unlock_timer(timer, *flags); /* * kc->timer_wait_running() might drop RCU lock. So @timer * cannot be touched anymore after the function returns! */ if (!WARN_ON_ONCE(!kc->timer_wait_running)) kc->timer_wait_running(timer); rcu_read_unlock(); /* Relock the timer. It might be not longer hashed. */ return lock_timer(timer_id, flags); } /* Set a POSIX.1b interval timer. */ int common_timer_set(struct k_itimer *timr, int flags, struct itimerspec64 *new_setting, struct itimerspec64 *old_setting) { const struct k_clock *kc = timr->kclock; bool sigev_none; ktime_t expires; if (old_setting) common_timer_get(timr, old_setting); /* Prevent rearming by clearing the interval */ timr->it_interval = 0; /* * Careful here. On SMP systems the timer expiry function could be * active and spinning on timr->it_lock. */ if (kc->timer_try_to_cancel(timr) < 0) return TIMER_RETRY; timr->it_active = 0; timr->it_requeue_pending = (timr->it_requeue_pending + 2) & ~REQUEUE_PENDING; timr->it_overrun_last = 0; /* Switch off the timer when it_value is zero */ if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec) return 0; timr->it_interval = timespec64_to_ktime(new_setting->it_interval); expires = timespec64_to_ktime(new_setting->it_value); if (flags & TIMER_ABSTIME) expires = timens_ktime_to_host(timr->it_clock, expires); sigev_none = timr->it_sigev_notify == SIGEV_NONE; kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); timr->it_active = !sigev_none; return 0; } static int do_timer_settime(timer_t timer_id, int tmr_flags, struct itimerspec64 *new_spec64, struct itimerspec64 *old_spec64) { const struct k_clock *kc; struct k_itimer *timr; unsigned long flags; int error = 0; if (!timespec64_valid(&new_spec64->it_interval) || !timespec64_valid(&new_spec64->it_value)) return -EINVAL; if (old_spec64) memset(old_spec64, 0, sizeof(*old_spec64)); timr = lock_timer(timer_id, &flags); retry: if (!timr) return -EINVAL; kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; else error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64); if (error == TIMER_RETRY) { // We already got the old time... old_spec64 = NULL; /* Unlocks and relocks the timer if it still exists */ timr = timer_wait_running(timr, &flags); goto retry; } unlock_timer(timr, flags); return error; } /* Set a POSIX.1b interval timer */ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, const struct __kernel_itimerspec __user *, new_setting, struct __kernel_itimerspec __user *, old_setting) { struct itimerspec64 new_spec, old_spec, *rtn; int error = 0; if (!new_setting) return -EINVAL; if (get_itimerspec64(&new_spec, new_setting)) return -EFAULT; rtn = old_setting ? &old_spec : NULL; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old_setting) { if (put_itimerspec64(&old_spec, old_setting)) error = -EFAULT; } return error; } #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags, struct old_itimerspec32 __user *, new, struct old_itimerspec32 __user *, old) { struct itimerspec64 new_spec, old_spec; struct itimerspec64 *rtn = old ? &old_spec : NULL; int error = 0; if (!new) return -EINVAL; if (get_old_itimerspec32(&new_spec, new)) return -EFAULT; error = do_timer_settime(timer_id, flags, &new_spec, rtn); if (!error && old) { if (put_old_itimerspec32(&old_spec, old)) error = -EFAULT; } return error; } #endif int common_timer_del(struct k_itimer *timer) { const struct k_clock *kc = timer->kclock; timer->it_interval = 0; if (kc->timer_try_to_cancel(timer) < 0) return TIMER_RETRY; timer->it_active = 0; return 0; } static inline int timer_delete_hook(struct k_itimer *timer) { const struct k_clock *kc = timer->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_del)) return -EINVAL; return kc->timer_del(timer); } /* Delete a POSIX.1b interval timer. */ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id) { struct k_itimer *timer; unsigned long flags; timer = lock_timer(timer_id, &flags); retry_delete: if (!timer) return -EINVAL; if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) { /* Unlocks and relocks the timer if it still exists */ timer = timer_wait_running(timer, &flags); goto retry_delete; } spin_lock(¤t->sighand->siglock); list_del(&timer->list); spin_unlock(¤t->sighand->siglock); /* * A concurrent lookup could check timer::it_signal lockless. It * will reevaluate with timer::it_lock held and observe the NULL. */ WRITE_ONCE(timer->it_signal, NULL); unlock_timer(timer, flags); posix_timer_unhash_and_free(timer); return 0; } /* * Delete a timer if it is armed, remove it from the hash and schedule it * for RCU freeing. */ static void itimer_delete(struct k_itimer *timer) { unsigned long flags; /* * irqsave is required to make timer_wait_running() work. */ spin_lock_irqsave(&timer->it_lock, flags); retry_delete: /* * Even if the timer is not longer accessible from other tasks * it still might be armed and queued in the underlying timer * mechanism. Worse, that timer mechanism might run the expiry * function concurrently. */ if (timer_delete_hook(timer) == TIMER_RETRY) { /* * Timer is expired concurrently, prevent livelocks * and pointless spinning on RT. * * timer_wait_running() drops timer::it_lock, which opens * the possibility for another task to delete the timer. * * That's not possible here because this is invoked from * do_exit() only for the last thread of the thread group. * So no other task can access and delete that timer. */ if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer)) return; goto retry_delete; } list_del(&timer->list); /* * Setting timer::it_signal to NULL is technically not required * here as nothing can access the timer anymore legitimately via * the hash table. Set it to NULL nevertheless so that all deletion * paths are consistent. */ WRITE_ONCE(timer->it_signal, NULL); spin_unlock_irqrestore(&timer->it_lock, flags); posix_timer_unhash_and_free(timer); } /* * Invoked from do_exit() when the last thread of a thread group exits. * At that point no other task can access the timers of the dying * task anymore. */ void exit_itimers(struct task_struct *tsk) { struct list_head timers; struct k_itimer *tmr; if (list_empty(&tsk->signal->posix_timers)) return; /* Protect against concurrent read via /proc/$PID/timers */ spin_lock_irq(&tsk->sighand->siglock); list_replace_init(&tsk->signal->posix_timers, &timers); spin_unlock_irq(&tsk->sighand->siglock); /* The timers are not longer accessible via tsk::signal */ while (!list_empty(&timers)) { tmr = list_first_entry(&timers, struct k_itimer, list); itimer_delete(tmr); } } SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock, const struct __kernel_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 new_tp; if (!kc || !kc->clock_set) return -EINVAL; if (get_timespec64(&new_tp, tp)) return -EFAULT; /* * Permission checks have to be done inside the clock specific * setter callback. */ return kc->clock_set(which_clock, &new_tp); } SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock, struct __kernel_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 kernel_tp; int error; if (!kc) return -EINVAL; error = kc->clock_get_timespec(which_clock, &kernel_tp); if (!error && put_timespec64(&kernel_tp, tp)) error = -EFAULT; return error; } int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx) { const struct k_clock *kc = clockid_to_kclock(which_clock); if (!kc) return -EINVAL; if (!kc->clock_adj) return -EOPNOTSUPP; return kc->clock_adj(which_clock, ktx); } SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, struct __kernel_timex __user *, utx) { struct __kernel_timex ktx; int err; if (copy_from_user(&ktx, utx, sizeof(ktx))) return -EFAULT; err = do_clock_adjtime(which_clock, &ktx); if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) return -EFAULT; return err; } /** * sys_clock_getres - Get the resolution of a clock * @which_clock: The clock to get the resolution for * @tp: Pointer to a a user space timespec64 for storage * * POSIX defines: * * "The clock_getres() function shall return the resolution of any * clock. Clock resolutions are implementation-defined and cannot be set by * a process. If the argument res is not NULL, the resolution of the * specified clock shall be stored in the location pointed to by res. If * res is NULL, the clock resolution is not returned. If the time argument * of clock_settime() is not a multiple of res, then the value is truncated * to a multiple of res." * * Due to the various hardware constraints the real resolution can vary * wildly and even change during runtime when the underlying devices are * replaced. The kernel also can use hardware devices with different * resolutions for reading the time and for arming timers. * * The kernel therefore deviates from the POSIX spec in various aspects: * * 1) The resolution returned to user space * * For CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME, CLOCK_TAI, * CLOCK_REALTIME_ALARM, CLOCK_BOOTTIME_ALAREM and CLOCK_MONOTONIC_RAW * the kernel differentiates only two cases: * * I) Low resolution mode: * * When high resolution timers are disabled at compile or runtime * the resolution returned is nanoseconds per tick, which represents * the precision at which timers expire. * * II) High resolution mode: * * When high resolution timers are enabled the resolution returned * is always one nanosecond independent of the actual resolution of * the underlying hardware devices. * * For CLOCK_*_ALARM the actual resolution depends on system * state. When system is running the resolution is the same as the * resolution of the other clocks. During suspend the actual * resolution is the resolution of the underlying RTC device which * might be way less precise than the clockevent device used during * running state. * * For CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE the resolution * returned is always nanoseconds per tick. * * For CLOCK_PROCESS_CPUTIME and CLOCK_THREAD_CPUTIME the resolution * returned is always one nanosecond under the assumption that the * underlying scheduler clock has a better resolution than nanoseconds * per tick. * * For dynamic POSIX clocks (PTP devices) the resolution returned is * always one nanosecond. * * 2) Affect on sys_clock_settime() * * The kernel does not truncate the time which is handed in to * sys_clock_settime(). The kernel internal timekeeping is always using * nanoseconds precision independent of the clocksource device which is * used to read the time from. The resolution of that device only * affects the presicion of the time returned by sys_clock_gettime(). * * Returns: * 0 Success. @tp contains the resolution * -EINVAL @which_clock is not a valid clock ID * -EFAULT Copying the resolution to @tp faulted * -ENODEV Dynamic POSIX clock is not backed by a device * -EOPNOTSUPP Dynamic POSIX clock does not support getres() */ SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock, struct __kernel_timespec __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 rtn_tp; int error; if (!kc) return -EINVAL; error = kc->clock_getres(which_clock, &rtn_tp); if (!error && tp && put_timespec64(&rtn_tp, tp)) error = -EFAULT; return error; } #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock, struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; if (!kc || !kc->clock_set) return -EINVAL; if (get_old_timespec32(&ts, tp)) return -EFAULT; return kc->clock_set(which_clock, &ts); } SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock, struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; int err; if (!kc) return -EINVAL; err = kc->clock_get_timespec(which_clock, &ts); if (!err && put_old_timespec32(&ts, tp)) err = -EFAULT; return err; } SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, struct old_timex32 __user *, utp) { struct __kernel_timex ktx; int err; err = get_old_timex32(&ktx, utp); if (err) return err; err = do_clock_adjtime(which_clock, &ktx); if (err >= 0 && put_old_timex32(utp, &ktx)) return -EFAULT; return err; } SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock, struct old_timespec32 __user *, tp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 ts; int err; if (!kc) return -EINVAL; err = kc->clock_getres(which_clock, &ts); if (!err && tp && put_old_timespec32(&ts, tp)) return -EFAULT; return err; } #endif /* * sys_clock_nanosleep() for CLOCK_REALTIME and CLOCK_TAI */ static int common_nsleep(const clockid_t which_clock, int flags, const struct timespec64 *rqtp) { ktime_t texp = timespec64_to_ktime(*rqtp); return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); } /* * sys_clock_nanosleep() for CLOCK_MONOTONIC and CLOCK_BOOTTIME * * Absolute nanosleeps for these clocks are time-namespace adjusted. */ static int common_nsleep_timens(const clockid_t which_clock, int flags, const struct timespec64 *rqtp) { ktime_t texp = timespec64_to_ktime(*rqtp); if (flags & TIMER_ABSTIME) texp = timens_ktime_to_host(which_clock, texp); return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL, which_clock); } SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags, const struct __kernel_timespec __user *, rqtp, struct __kernel_timespec __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; if (!kc) return -EINVAL; if (!kc->nsleep) return -EOPNOTSUPP; if (get_timespec64(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; current->restart_block.nanosleep.rmtp = rmtp; return kc->nsleep(which_clock, flags, &t); } #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, struct old_timespec32 __user *, rqtp, struct old_timespec32 __user *, rmtp) { const struct k_clock *kc = clockid_to_kclock(which_clock); struct timespec64 t; if (!kc) return -EINVAL; if (!kc->nsleep) return -EOPNOTSUPP; if (get_old_timespec32(&t, rqtp)) return -EFAULT; if (!timespec64_valid(&t)) return -EINVAL; if (flags & TIMER_ABSTIME) rmtp = NULL; current->restart_block.fn = do_no_restart_syscall; current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; current->restart_block.nanosleep.compat_rmtp = rmtp; return kc->nsleep(which_clock, flags, &t); } #endif static const struct k_clock clock_realtime = { .clock_getres = posix_get_hrtimer_res, .clock_get_timespec = posix_get_realtime_timespec, .clock_get_ktime = posix_get_realtime_ktime, .clock_set = posix_clock_realtime_set, .clock_adj = posix_clock_realtime_adj, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_wait_running = common_timer_wait_running, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_monotonic = { .clock_getres = posix_get_hrtimer_res, .clock_get_timespec = posix_get_monotonic_timespec, .clock_get_ktime = posix_get_monotonic_ktime, .nsleep = common_nsleep_timens, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_wait_running = common_timer_wait_running, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_monotonic_raw = { .clock_getres = posix_get_hrtimer_res, .clock_get_timespec = posix_get_monotonic_raw, }; static const struct k_clock clock_realtime_coarse = { .clock_getres = posix_get_coarse_res, .clock_get_timespec = posix_get_realtime_coarse, }; static const struct k_clock clock_monotonic_coarse = { .clock_getres = posix_get_coarse_res, .clock_get_timespec = posix_get_monotonic_coarse, }; static const struct k_clock clock_tai = { .clock_getres = posix_get_hrtimer_res, .clock_get_ktime = posix_get_tai_ktime, .clock_get_timespec = posix_get_tai_timespec, .nsleep = common_nsleep, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_wait_running = common_timer_wait_running, .timer_arm = common_hrtimer_arm, }; static const struct k_clock clock_boottime = { .clock_getres = posix_get_hrtimer_res, .clock_get_ktime = posix_get_boottime_ktime, .clock_get_timespec = posix_get_boottime_timespec, .nsleep = common_nsleep_timens, .timer_create = common_timer_create, .timer_set = common_timer_set, .timer_get = common_timer_get, .timer_del = common_timer_del, .timer_rearm = common_hrtimer_rearm, .timer_forward = common_hrtimer_forward, .timer_remaining = common_hrtimer_remaining, .timer_try_to_cancel = common_hrtimer_try_to_cancel, .timer_wait_running = common_timer_wait_running, .timer_arm = common_hrtimer_arm, }; static const struct k_clock * const posix_clocks[] = { [CLOCK_REALTIME] = &clock_realtime, [CLOCK_MONOTONIC] = &clock_monotonic, [CLOCK_PROCESS_CPUTIME_ID] = &clock_process, [CLOCK_THREAD_CPUTIME_ID] = &clock_thread, [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw, [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse, [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse, [CLOCK_BOOTTIME] = &clock_boottime, [CLOCK_REALTIME_ALARM] = &alarm_clock, [CLOCK_BOOTTIME_ALARM] = &alarm_clock, [CLOCK_TAI] = &clock_tai, }; static const struct k_clock *clockid_to_kclock(const clockid_t id) { clockid_t idx = id; if (id < 0) { return (id & CLOCKFD_MASK) == CLOCKFD ? &clock_posix_dynamic : &clock_posix_cpu; } if (id >= ARRAY_SIZE(posix_clocks)) return NULL; return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))]; } |
| 28 4 2 2 17 4 10 21 4 10 23 2 1 1 1 18 24 4 6 17 23 11 1 1 9 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2017 Facebook */ #include <linux/slab.h> #include <linux/bpf.h> #include <linux/btf.h> #include "map_in_map.h" struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) { struct bpf_map *inner_map, *inner_map_meta; u32 inner_map_meta_size; struct fd f; int ret; f = fdget(inner_map_ufd); inner_map = __bpf_map_get(f); if (IS_ERR(inner_map)) return inner_map; /* Does not support >1 level map-in-map */ if (inner_map->inner_map_meta) { ret = -EINVAL; goto put; } if (!inner_map->ops->map_meta_equal) { ret = -ENOTSUPP; goto put; } inner_map_meta_size = sizeof(*inner_map_meta); /* In some cases verifier needs to access beyond just base map. */ if (inner_map->ops == &array_map_ops || inner_map->ops == &percpu_array_map_ops) inner_map_meta_size = sizeof(struct bpf_array); inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); if (!inner_map_meta) { ret = -ENOMEM; goto put; } inner_map_meta->map_type = inner_map->map_type; inner_map_meta->key_size = inner_map->key_size; inner_map_meta->value_size = inner_map->value_size; inner_map_meta->map_flags = inner_map->map_flags; inner_map_meta->max_entries = inner_map->max_entries; inner_map_meta->record = btf_record_dup(inner_map->record); if (IS_ERR(inner_map_meta->record)) { /* btf_record_dup returns NULL or valid pointer in case of * invalid/empty/valid, but ERR_PTR in case of errors. During * equality NULL or IS_ERR is equivalent. */ ret = PTR_ERR(inner_map_meta->record); goto free; } /* Note: We must use the same BTF, as we also used btf_record_dup above * which relies on BTF being same for both maps, as some members like * record->fields.list_head have pointers like value_rec pointing into * inner_map->btf. */ if (inner_map->btf) { btf_get(inner_map->btf); inner_map_meta->btf = inner_map->btf; } /* Misc members not needed in bpf_map_meta_equal() check. */ inner_map_meta->ops = inner_map->ops; if (inner_map->ops == &array_map_ops || inner_map->ops == &percpu_array_map_ops) { struct bpf_array *inner_array_meta = container_of(inner_map_meta, struct bpf_array, map); struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map); inner_array_meta->index_mask = inner_array->index_mask; inner_array_meta->elem_size = inner_array->elem_size; inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1; } fdput(f); return inner_map_meta; free: kfree(inner_map_meta); put: fdput(f); return ERR_PTR(ret); } void bpf_map_meta_free(struct bpf_map *map_meta) { bpf_map_free_record(map_meta); btf_put(map_meta->btf); kfree(map_meta); } bool bpf_map_meta_equal(const struct bpf_map *meta0, const struct bpf_map *meta1) { /* No need to compare ops because it is covered by map_type */ return meta0->map_type == meta1->map_type && meta0->key_size == meta1->key_size && meta0->value_size == meta1->value_size && meta0->map_flags == meta1->map_flags && btf_record_equal(meta0->record, meta1->record); } void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file /* not used */, int ufd) { struct bpf_map *inner_map, *inner_map_meta; struct fd f; f = fdget(ufd); inner_map = __bpf_map_get(f); if (IS_ERR(inner_map)) return inner_map; inner_map_meta = map->inner_map_meta; if (inner_map_meta->ops->map_meta_equal(inner_map_meta, inner_map)) bpf_map_inc(inner_map); else inner_map = ERR_PTR(-EINVAL); fdput(f); return inner_map; } void bpf_map_fd_put_ptr(struct bpf_map *map, void *ptr, bool need_defer) { struct bpf_map *inner_map = ptr; /* Defer the freeing of inner map according to the sleepable attribute * of bpf program which owns the outer map, so unnecessary waiting for * RCU tasks trace grace period can be avoided. */ if (need_defer) { if (atomic64_read(&map->sleepable_refcnt)) WRITE_ONCE(inner_map->free_after_mult_rcu_gp, true); else WRITE_ONCE(inner_map->free_after_rcu_gp, true); } bpf_map_put(inner_map); } u32 bpf_map_fd_sys_lookup_elem(void *ptr) { return ((struct bpf_map *)ptr)->id; } |
| 12 25 45 29 44 45 45 14 14 14 14 14 25 25 14 10 10 10 45 44 26 45 45 45 45 45 44 26 26 4 4 11 11 11 11 11 1 10 11 26 26 26 26 2 2 2 2 2 10 10 12 12 12 10 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 | // SPDX-License-Identifier: LGPL-2.1+ /* Copyright (C) 2022 Kent Overstreet */ #include <linux/bitmap.h> #include <linux/err.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string_helpers.h> #include "printbuf.h" static inline unsigned __printbuf_linelen(struct printbuf *buf, unsigned pos) { return pos - buf->last_newline; } static inline unsigned printbuf_linelen(struct printbuf *buf) { return __printbuf_linelen(buf, buf->pos); } /* * Returns spaces from start of line, if set, or 0 if unset: */ static inline unsigned cur_tabstop(struct printbuf *buf) { return buf->cur_tabstop < buf->nr_tabstops ? buf->_tabstops[buf->cur_tabstop] : 0; } int bch2_printbuf_make_room(struct printbuf *out, unsigned extra) { /* Reserved space for terminating nul: */ extra += 1; if (out->pos + extra <= out->size) return 0; if (!out->heap_allocated) { out->overflow = true; return 0; } unsigned new_size = roundup_pow_of_two(out->size + extra); /* Sanity check... */ if (new_size > PAGE_SIZE << MAX_PAGE_ORDER) { out->allocation_failure = true; out->overflow = true; return -ENOMEM; } /* * Note: output buffer must be freeable with kfree(), it's not required * that the user use printbuf_exit(). */ char *buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_NOWAIT); if (!buf) { out->allocation_failure = true; out->overflow = true; return -ENOMEM; } out->buf = buf; out->size = new_size; return 0; } static void printbuf_advance_pos(struct printbuf *out, unsigned len) { out->pos += min(len, printbuf_remaining(out)); } static void printbuf_insert_spaces(struct printbuf *out, unsigned pos, unsigned nr) { unsigned move = out->pos - pos; bch2_printbuf_make_room(out, nr); if (pos + nr < out->size) memmove(out->buf + pos + nr, out->buf + pos, min(move, out->size - 1 - pos - nr)); if (pos < out->size) memset(out->buf + pos, ' ', min(nr, out->size - pos)); printbuf_advance_pos(out, nr); printbuf_nul_terminate_reserved(out); } static void __printbuf_do_indent(struct printbuf *out, unsigned pos) { while (true) { int pad; unsigned len = out->pos - pos; char *p = out->buf + pos; char *n = memscan(p, '\n', len); if (cur_tabstop(out)) { n = min(n, (char *) memscan(p, '\r', len)); n = min(n, (char *) memscan(p, '\t', len)); } pos = n - out->buf; if (pos == out->pos) break; switch (*n) { case '\n': pos++; out->last_newline = pos; printbuf_insert_spaces(out, pos, out->indent); pos = min(pos + out->indent, out->pos); out->last_field = pos; out->cur_tabstop = 0; break; case '\r': memmove(n, n + 1, out->pos - pos); --out->pos; pad = (int) cur_tabstop(out) - (int) __printbuf_linelen(out, pos); if (pad > 0) { printbuf_insert_spaces(out, out->last_field, pad); pos += pad; } out->last_field = pos; out->cur_tabstop++; break; case '\t': pad = (int) cur_tabstop(out) - (int) __printbuf_linelen(out, pos) - 1; if (pad > 0) { *n = ' '; printbuf_insert_spaces(out, pos, pad - 1); pos += pad; } else { memmove(n, n + 1, out->pos - pos); --out->pos; } out->last_field = pos; out->cur_tabstop++; break; } } } static inline void printbuf_do_indent(struct printbuf *out, unsigned pos) { if (out->has_indent_or_tabstops && !out->suppress_indent_tabstop_handling) __printbuf_do_indent(out, pos); } void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list args) { int len; do { va_list args2; va_copy(args2, args); len = vsnprintf(out->buf + out->pos, printbuf_remaining_size(out), fmt, args2); va_end(args2); } while (len > printbuf_remaining(out) && !bch2_printbuf_make_room(out, len)); unsigned indent_pos = out->pos; printbuf_advance_pos(out, len); printbuf_do_indent(out, indent_pos); } void bch2_prt_printf(struct printbuf *out, const char *fmt, ...) { va_list args; int len; do { va_start(args, fmt); len = vsnprintf(out->buf + out->pos, printbuf_remaining_size(out), fmt, args); va_end(args); } while (len > printbuf_remaining(out) && !bch2_printbuf_make_room(out, len)); unsigned indent_pos = out->pos; printbuf_advance_pos(out, len); printbuf_do_indent(out, indent_pos); } /** * bch2_printbuf_str() - returns printbuf's buf as a C string, guaranteed to be * null terminated * @buf: printbuf to terminate * Returns: Printbuf contents, as a nul terminated C string */ const char *bch2_printbuf_str(const struct printbuf *buf) { /* * If we've written to a printbuf then it's guaranteed to be a null * terminated string - but if we haven't, then we might not have * allocated a buffer at all: */ return buf->pos ? buf->buf : ""; } /** * bch2_printbuf_exit() - exit a printbuf, freeing memory it owns and poisoning it * against accidental use. * @buf: printbuf to exit */ void bch2_printbuf_exit(struct printbuf *buf) { if (buf->heap_allocated) { kfree(buf->buf); buf->buf = ERR_PTR(-EINTR); /* poison value */ } } void bch2_printbuf_tabstops_reset(struct printbuf *buf) { buf->nr_tabstops = 0; } void bch2_printbuf_tabstop_pop(struct printbuf *buf) { if (buf->nr_tabstops) --buf->nr_tabstops; } /* * bch2_printbuf_tabstop_set() - add a tabstop, n spaces from the previous tabstop * * @buf: printbuf to control * @spaces: number of spaces from previous tabpstop * * In the future this function may allocate memory if setting more than * PRINTBUF_INLINE_TABSTOPS or setting tabstops more than 255 spaces from start * of line. */ int bch2_printbuf_tabstop_push(struct printbuf *buf, unsigned spaces) { unsigned prev_tabstop = buf->nr_tabstops ? buf->_tabstops[buf->nr_tabstops - 1] : 0; if (WARN_ON(buf->nr_tabstops >= ARRAY_SIZE(buf->_tabstops))) return -EINVAL; buf->_tabstops[buf->nr_tabstops++] = prev_tabstop + spaces; buf->has_indent_or_tabstops = true; return 0; } /** * bch2_printbuf_indent_add() - add to the current indent level * * @buf: printbuf to control * @spaces: number of spaces to add to the current indent level * * Subsequent lines, and the current line if the output position is at the start * of the current line, will be indented by @spaces more spaces. */ void bch2_printbuf_indent_add(struct printbuf *buf, unsigned spaces) { if (WARN_ON_ONCE(buf->indent + spaces < buf->indent)) spaces = 0; buf->indent += spaces; prt_chars(buf, ' ', spaces); buf->has_indent_or_tabstops = true; } /** * bch2_printbuf_indent_sub() - subtract from the current indent level * * @buf: printbuf to control * @spaces: number of spaces to subtract from the current indent level * * Subsequent lines, and the current line if the output position is at the start * of the current line, will be indented by @spaces less spaces. */ void bch2_printbuf_indent_sub(struct printbuf *buf, unsigned spaces) { if (WARN_ON_ONCE(spaces > buf->indent)) spaces = buf->indent; if (buf->last_newline + buf->indent == buf->pos) { buf->pos -= spaces; printbuf_nul_terminate(buf); } buf->indent -= spaces; if (!buf->indent && !buf->nr_tabstops) buf->has_indent_or_tabstops = false; } void bch2_prt_newline(struct printbuf *buf) { bch2_printbuf_make_room(buf, 1 + buf->indent); __prt_char_reserved(buf, '\n'); buf->last_newline = buf->pos; __prt_chars_reserved(buf, ' ', buf->indent); printbuf_nul_terminate_reserved(buf); buf->last_field = buf->pos; buf->cur_tabstop = 0; } void bch2_printbuf_strip_trailing_newline(struct printbuf *out) { for (int p = out->pos - 1; p >= 0; --p) { if (out->buf[p] == '\n') { out->pos = p; break; } if (out->buf[p] != ' ') break; } printbuf_nul_terminate_reserved(out); } static void __prt_tab(struct printbuf *out) { int spaces = max_t(int, 0, cur_tabstop(out) - printbuf_linelen(out)); prt_chars(out, ' ', spaces); out->last_field = out->pos; out->cur_tabstop++; } /** * bch2_prt_tab() - Advance printbuf to the next tabstop * @out: printbuf to control * * Advance output to the next tabstop by printing spaces. */ void bch2_prt_tab(struct printbuf *out) { if (WARN_ON(!cur_tabstop(out))) return; __prt_tab(out); } static void __prt_tab_rjust(struct printbuf *buf) { int pad = (int) cur_tabstop(buf) - (int) printbuf_linelen(buf); if (pad > 0) printbuf_insert_spaces(buf, buf->last_field, pad); buf->last_field = buf->pos; buf->cur_tabstop++; } /** * bch2_prt_tab_rjust - Advance printbuf to the next tabstop, right justifying * previous output * * @buf: printbuf to control * * Advance output to the next tabstop by inserting spaces immediately after the * previous tabstop, right justifying previously outputted text. */ void bch2_prt_tab_rjust(struct printbuf *buf) { if (WARN_ON(!cur_tabstop(buf))) return; __prt_tab_rjust(buf); } /** * bch2_prt_bytes_indented() - Print an array of chars, handling embedded control characters * * @out: output printbuf * @str: string to print * @count: number of bytes to print * * The following contol characters are handled as so: * \n: prt_newline newline that obeys current indent level * \t: prt_tab advance to next tabstop * \r: prt_tab_rjust advance to next tabstop, with right justification */ void bch2_prt_bytes_indented(struct printbuf *out, const char *str, unsigned count) { unsigned indent_pos = out->pos; prt_bytes(out, str, count); printbuf_do_indent(out, indent_pos); } /** * bch2_prt_human_readable_u64() - Print out a u64 in human readable units * @out: output printbuf * @v: integer to print * * Units of 2^10 (default) or 10^3 are controlled via @out->si_units */ void bch2_prt_human_readable_u64(struct printbuf *out, u64 v) { bch2_printbuf_make_room(out, 10); unsigned len = string_get_size(v, 1, !out->si_units, out->buf + out->pos, printbuf_remaining_size(out)); printbuf_advance_pos(out, len); } /** * bch2_prt_human_readable_s64() - Print out a s64 in human readable units * @out: output printbuf * @v: integer to print * * Units of 2^10 (default) or 10^3 are controlled via @out->si_units */ void bch2_prt_human_readable_s64(struct printbuf *out, s64 v) { if (v < 0) prt_char(out, '-'); bch2_prt_human_readable_u64(out, abs(v)); } /** * bch2_prt_units_u64() - Print out a u64 according to printbuf unit options * @out: output printbuf * @v: integer to print * * Units are either raw (default), or human reabable units (controlled via * @buf->human_readable_units) */ void bch2_prt_units_u64(struct printbuf *out, u64 v) { if (out->human_readable_units) bch2_prt_human_readable_u64(out, v); else bch2_prt_printf(out, "%llu", v); } /** * bch2_prt_units_s64() - Print out a s64 according to printbuf unit options * @out: output printbuf * @v: integer to print * * Units are either raw (default), or human reabable units (controlled via * @buf->human_readable_units) */ void bch2_prt_units_s64(struct printbuf *out, s64 v) { if (v < 0) prt_char(out, '-'); bch2_prt_units_u64(out, abs(v)); } void bch2_prt_string_option(struct printbuf *out, const char * const list[], size_t selected) { for (size_t i = 0; list[i]; i++) bch2_prt_printf(out, i == selected ? "[%s] " : "%s ", list[i]); } void bch2_prt_bitflags(struct printbuf *out, const char * const list[], u64 flags) { unsigned bit, nr = 0; bool first = true; while (list[nr]) nr++; while (flags && (bit = __ffs64(flags)) < nr) { if (!first) bch2_prt_printf(out, ","); first = false; bch2_prt_printf(out, "%s", list[bit]); flags ^= BIT_ULL(bit); } } void bch2_prt_bitflags_vector(struct printbuf *out, const char * const list[], unsigned long *v, unsigned nr) { bool first = true; unsigned i; for (i = 0; i < nr; i++) if (!list[i]) { nr = i - 1; break; } for_each_set_bit(i, v, nr) { if (!first) bch2_prt_printf(out, ","); first = false; bch2_prt_printf(out, "%s", list[i]); } } |
| 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 | /* * IPv4 specific functions of netfilter core * * Rusty Russell (C) 2000 -- This code is GPL. * Patrick McHardy (C) 2006-2012 */ #include <linux/kernel.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/gfp.h> #include <linux/export.h> #include <net/route.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/netfilter/nf_queue.h> /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type) { const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; __u8 flags; struct net_device *dev = skb_dst(skb)->dev; struct flow_keys flkeys; unsigned int hh_len; sk = sk_to_full_sk(sk); flags = sk ? inet_sk_flowi_flags(sk) : 0; if (addr_type == RTN_UNSPEC) addr_type = inet_addr_type_dev_table(net, dev, saddr); if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) flags |= FLOWI_FLAG_ANYSRC; else saddr = 0; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. */ fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0; fl4.flowi4_l3mdev = l3mdev_master_ifindex(dev); fl4.flowi4_mark = skb->mark; fl4.flowi4_flags = flags; fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return PTR_ERR(rt); /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); if (skb_dst(skb)->error) return skb_dst(skb)->error; #ifdef CONFIG_XFRM if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && xfrm_decode_session(net, skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -ENOMEM; return 0; } EXPORT_SYMBOL(ip_route_me_harder); int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict __always_unused) { struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); if (IS_ERR(rt)) return PTR_ERR(rt); *dst = &rt->dst; return 0; } EXPORT_SYMBOL_GPL(nf_ip_route); |
| 3 4 4 1 4 10 3 1 6 1 5 1 2 5 2 1 1 1 1 10 1 1 2 1 5 3 3 1 2 1 1 1 10 10 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 | // SPDX-License-Identifier: GPL-2.0 /* XSKMAP used for AF_XDP sockets * Copyright(c) 2018 Intel Corporation. */ #include <linux/bpf.h> #include <linux/filter.h> #include <net/xdp_sock.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/btf_ids.h> #include "xsk.h" static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, struct xdp_sock __rcu **map_entry) { struct xsk_map_node *node; node = bpf_map_kzalloc(&map->map, sizeof(*node), GFP_ATOMIC | __GFP_NOWARN); if (!node) return ERR_PTR(-ENOMEM); bpf_map_inc(&map->map); atomic_inc(&map->count); node->map = map; node->map_entry = map_entry; return node; } static void xsk_map_node_free(struct xsk_map_node *node) { struct xsk_map *map = node->map; bpf_map_put(&node->map->map); kfree(node); atomic_dec(&map->count); } static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node) { spin_lock_bh(&xs->map_list_lock); list_add_tail(&node->node, &xs->map_list); spin_unlock_bh(&xs->map_list_lock); } static void xsk_map_sock_delete(struct xdp_sock *xs, struct xdp_sock __rcu **map_entry) { struct xsk_map_node *n, *tmp; spin_lock_bh(&xs->map_list_lock); list_for_each_entry_safe(n, tmp, &xs->map_list, node) { if (map_entry == n->map_entry) { list_del(&n->node); xsk_map_node_free(n); } } spin_unlock_bh(&xs->map_list_lock); } static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) { struct xsk_map *m; int numa_node; u64 size; if (attr->max_entries == 0 || attr->key_size != 4 || attr->value_size != 4 || attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) return ERR_PTR(-EINVAL); numa_node = bpf_map_attr_numa_node(attr); size = struct_size(m, xsk_map, attr->max_entries); m = bpf_map_area_alloc(size, numa_node); if (!m) return ERR_PTR(-ENOMEM); bpf_map_init_from_attr(&m->map, attr); spin_lock_init(&m->lock); return &m->map; } static u64 xsk_map_mem_usage(const struct bpf_map *map) { struct xsk_map *m = container_of(map, struct xsk_map, map); return struct_size(m, xsk_map, map->max_entries) + (u64)atomic_read(&m->count) * sizeof(struct xsk_map_node); } static void xsk_map_free(struct bpf_map *map) { struct xsk_map *m = container_of(map, struct xsk_map, map); synchronize_net(); bpf_map_area_free(m); } static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) { struct xsk_map *m = container_of(map, struct xsk_map, map); u32 index = key ? *(u32 *)key : U32_MAX; u32 *next = next_key; if (index >= m->map.max_entries) { *next = 0; return 0; } if (index == m->map.max_entries - 1) return -ENOENT; *next = index + 1; return 0; } static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2; struct bpf_insn *insn = insn_buf; *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *))); *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map)); *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp); *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0); *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); *insn++ = BPF_MOV64_IMM(ret, 0); return insn - insn_buf; } /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or * by local_bh_disable() (from XDP calls inside NAPI). The * rcu_read_lock_bh_held() below makes lockdep accept both. */ static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key) { struct xsk_map *m = container_of(map, struct xsk_map, map); if (key >= map->max_entries) return NULL; return rcu_dereference_check(m->xsk_map[key], rcu_read_lock_bh_held()); } static void *xsk_map_lookup_elem(struct bpf_map *map, void *key) { return __xsk_map_lookup_elem(map, *(u32 *)key); } static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key) { return ERR_PTR(-EOPNOTSUPP); } static long xsk_map_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) { struct xsk_map *m = container_of(map, struct xsk_map, map); struct xdp_sock __rcu **map_entry; struct xdp_sock *xs, *old_xs; u32 i = *(u32 *)key, fd = *(u32 *)value; struct xsk_map_node *node; struct socket *sock; int err; if (unlikely(map_flags > BPF_EXIST)) return -EINVAL; if (unlikely(i >= m->map.max_entries)) return -E2BIG; sock = sockfd_lookup(fd, &err); if (!sock) return err; if (sock->sk->sk_family != PF_XDP) { sockfd_put(sock); return -EOPNOTSUPP; } xs = (struct xdp_sock *)sock->sk; map_entry = &m->xsk_map[i]; node = xsk_map_node_alloc(m, map_entry); if (IS_ERR(node)) { sockfd_put(sock); return PTR_ERR(node); } spin_lock_bh(&m->lock); old_xs = rcu_dereference_protected(*map_entry, lockdep_is_held(&m->lock)); if (old_xs == xs) { err = 0; goto out; } else if (old_xs && map_flags == BPF_NOEXIST) { err = -EEXIST; goto out; } else if (!old_xs && map_flags == BPF_EXIST) { err = -ENOENT; goto out; } xsk_map_sock_add(xs, node); rcu_assign_pointer(*map_entry, xs); if (old_xs) xsk_map_sock_delete(old_xs, map_entry); spin_unlock_bh(&m->lock); sockfd_put(sock); return 0; out: spin_unlock_bh(&m->lock); sockfd_put(sock); xsk_map_node_free(node); return err; } static long xsk_map_delete_elem(struct bpf_map *map, void *key) { struct xsk_map *m = container_of(map, struct xsk_map, map); struct xdp_sock __rcu **map_entry; struct xdp_sock *old_xs; int k = *(u32 *)key; if (k >= map->max_entries) return -EINVAL; spin_lock_bh(&m->lock); map_entry = &m->xsk_map[k]; old_xs = unrcu_pointer(xchg(map_entry, NULL)); if (old_xs) xsk_map_sock_delete(old_xs, map_entry); spin_unlock_bh(&m->lock); return 0; } static long xsk_map_redirect(struct bpf_map *map, u64 index, u64 flags) { return __bpf_xdp_redirect_map(map, index, flags, 0, __xsk_map_lookup_elem); } void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, struct xdp_sock __rcu **map_entry) { spin_lock_bh(&map->lock); if (rcu_access_pointer(*map_entry) == xs) { rcu_assign_pointer(*map_entry, NULL); xsk_map_sock_delete(xs, map_entry); } spin_unlock_bh(&map->lock); } static bool xsk_map_meta_equal(const struct bpf_map *meta0, const struct bpf_map *meta1) { return meta0->max_entries == meta1->max_entries && bpf_map_meta_equal(meta0, meta1); } BTF_ID_LIST_SINGLE(xsk_map_btf_ids, struct, xsk_map) const struct bpf_map_ops xsk_map_ops = { .map_meta_equal = xsk_map_meta_equal, .map_alloc = xsk_map_alloc, .map_free = xsk_map_free, .map_get_next_key = xsk_map_get_next_key, .map_lookup_elem = xsk_map_lookup_elem, .map_gen_lookup = xsk_map_gen_lookup, .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only, .map_update_elem = xsk_map_update_elem, .map_delete_elem = xsk_map_delete_elem, .map_check_btf = map_check_no_btf, .map_mem_usage = xsk_map_mem_usage, .map_btf_id = &xsk_map_btf_ids[0], .map_redirect = xsk_map_redirect, }; |
| 21 261 29 260 27 30 25 29 30 26 30 516 393 260 261 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 | // SPDX-License-Identifier: GPL-2.0 /* * lib/minmax.c: windowed min/max tracker * * Kathleen Nichols' algorithm for tracking the minimum (or maximum) * value of a data stream over some fixed time interval. (E.g., * the minimum RTT over the past five minutes.) It uses constant * space and constant time per update yet almost always delivers * the same minimum as an implementation that has to keep all the * data in the window. * * The algorithm keeps track of the best, 2nd best & 3rd best min * values, maintaining an invariant that the measurement time of * the n'th best >= n-1'th best. It also makes sure that the three * values are widely separated in the time window since that bounds * the worse case error when that data is monotonically increasing * over the window. * * Upon getting a new min, we can forget everything earlier because * it has no value - the new min is <= everything else in the window * by definition and it's the most recent. So we restart fresh on * every new min and overwrites 2nd & 3rd choices. The same property * holds for 2nd & 3rd best. */ #include <linux/module.h> #include <linux/win_minmax.h> /* As time advances, update the 1st, 2nd, and 3rd choices. */ static u32 minmax_subwin_update(struct minmax *m, u32 win, const struct minmax_sample *val) { u32 dt = val->t - m->s[0].t; if (unlikely(dt > win)) { /* * Passed entire window without a new val so make 2nd * choice the new val & 3rd choice the new 2nd choice. * we may have to iterate this since our 2nd choice * may also be outside the window (we checked on entry * that the third choice was in the window). */ m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; if (unlikely(val->t - m->s[0].t > win)) { m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; } } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) { /* * We've passed a quarter of the window without a new val * so take a 2nd choice from the 2nd quarter of the window. */ m->s[2] = m->s[1] = *val; } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) { /* * We've passed half the window without finding a new val * so take a 3rd choice from the last half of the window */ m->s[2] = *val; } return m->s[0].v; } /* Check if new measurement updates the 1st, 2nd or 3rd choice max. */ u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v >= m->s[0].v) || /* found new max? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v >= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v >= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); } EXPORT_SYMBOL(minmax_running_max); /* Check if new measurement updates the 1st, 2nd or 3rd choice min. */ u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v <= m->s[0].v) || /* found new min? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v <= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v <= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); } |
| 54 54 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Contiguous Memory Allocator * * Copyright (c) 2010-2011 by Samsung Electronics. * Copyright IBM Corporation, 2013 * Copyright LG Electronics Inc., 2014 * Written by: * Marek Szyprowski <m.szyprowski@samsung.com> * Michal Nazarewicz <mina86@mina86.com> * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * Joonsoo Kim <iamjoonsoo.kim@lge.com> */ #define pr_fmt(fmt) "cma: " fmt #define CREATE_TRACE_POINTS #include <linux/memblock.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/log2.h> #include <linux/cma.h> #include <linux/highmem.h> #include <linux/io.h> #include <linux/kmemleak.h> #include <trace/events/cma.h> #include "internal.h" #include "cma.h" struct cma cma_areas[MAX_CMA_AREAS]; unsigned cma_area_count; static DEFINE_MUTEX(cma_mutex); phys_addr_t cma_get_base(const struct cma *cma) { return PFN_PHYS(cma->base_pfn); } unsigned long cma_get_size(const struct cma *cma) { return cma->count << PAGE_SHIFT; } const char *cma_get_name(const struct cma *cma) { return cma->name; } static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, unsigned int align_order) { if (align_order <= cma->order_per_bit) return 0; return (1UL << (align_order - cma->order_per_bit)) - 1; } /* * Find the offset of the base PFN from the specified align_order. * The value returned is represented in order_per_bits. */ static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, unsigned int align_order) { return (cma->base_pfn & ((1UL << align_order) - 1)) >> cma->order_per_bit; } static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, unsigned long pages) { return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; } static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, unsigned long count) { unsigned long bitmap_no, bitmap_count; unsigned long flags; bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; bitmap_count = cma_bitmap_pages_to_bits(cma, count); spin_lock_irqsave(&cma->lock, flags); bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); spin_unlock_irqrestore(&cma->lock, flags); } static void __init cma_activate_area(struct cma *cma) { unsigned long base_pfn = cma->base_pfn, pfn; struct zone *zone; cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL); if (!cma->bitmap) goto out_error; /* * alloc_contig_range() requires the pfn range specified to be in the * same zone. Simplify by forcing the entire CMA resv range to be in the * same zone. */ WARN_ON_ONCE(!pfn_valid(base_pfn)); zone = page_zone(pfn_to_page(base_pfn)); for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); if (page_zone(pfn_to_page(pfn)) != zone) goto not_in_zone; } for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn += pageblock_nr_pages) init_cma_reserved_pageblock(pfn_to_page(pfn)); spin_lock_init(&cma->lock); #ifdef CONFIG_CMA_DEBUGFS INIT_HLIST_HEAD(&cma->mem_head); spin_lock_init(&cma->mem_head_lock); #endif return; not_in_zone: bitmap_free(cma->bitmap); out_error: /* Expose all pages to the buddy, they are useless for CMA. */ if (!cma->reserve_pages_on_error) { for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) free_reserved_page(pfn_to_page(pfn)); } totalcma_pages -= cma->count; cma->count = 0; pr_err("CMA area %s could not be activated\n", cma->name); return; } static int __init cma_init_reserved_areas(void) { int i; for (i = 0; i < cma_area_count; i++) cma_activate_area(&cma_areas[i]); return 0; } core_initcall(cma_init_reserved_areas); void __init cma_reserve_pages_on_error(struct cma *cma) { cma->reserve_pages_on_error = true; } /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area * @size: Size of the reserved area (in bytes), * @order_per_bit: Order of pages represented by one bit on bitmap. * @name: The name of the area. If this parameter is NULL, the name of * the area will be set to "cmaN", where N is a running counter of * used areas. * @res_cma: Pointer to store the created cma region. * * This function creates custom contiguous area from already reserved memory. */ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, const char *name, struct cma **res_cma) { struct cma *cma; /* Sanity checks */ if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } if (!size || !memblock_is_region_reserved(base, size)) return -EINVAL; /* ensure minimal alignment required by mm core */ if (!IS_ALIGNED(base | size, CMA_MIN_ALIGNMENT_BYTES)) return -EINVAL; /* * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ cma = &cma_areas[cma_area_count]; if (name) snprintf(cma->name, CMA_MAX_NAME, name); else snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); cma->base_pfn = PFN_DOWN(base); cma->count = size >> PAGE_SHIFT; cma->order_per_bit = order_per_bit; *res_cma = cma; cma_area_count++; totalcma_pages += (size / PAGE_SIZE); return 0; } /** * cma_declare_contiguous_nid() - reserve custom contiguous area * @base: Base address of the reserved area optional, use 0 for any * @size: Size of the reserved area (in bytes), * @limit: End address of the reserved memory (optional, 0 for any). * @alignment: Alignment for the CMA area, should be power of 2 or zero * @order_per_bit: Order of pages represented by one bit on bitmap. * @fixed: hint about where to place the reserved area * @name: The name of the area. See function cma_init_reserved_mem() * @res_cma: Pointer to store the created cma region. * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. This function allows to create custom reserved areas. * * If @fixed is true, reserve contiguous area at exactly @base. If false, * reserve in range from @base to @limit. */ int __init cma_declare_contiguous_nid(phys_addr_t base, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, bool fixed, const char *name, struct cma **res_cma, int nid) { phys_addr_t memblock_end = memblock_end_of_DRAM(); phys_addr_t highmem_start; int ret; /* * We can't use __pa(high_memory) directly, since high_memory * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) * complain. Find the boundary by adding one to the last valid * address. */ highmem_start = __pa(high_memory - 1) + 1; pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } if (!size) return -EINVAL; if (alignment && !is_power_of_2(alignment)) return -EINVAL; if (!IS_ENABLED(CONFIG_NUMA)) nid = NUMA_NO_NODE; /* Sanitise input arguments. */ alignment = max_t(phys_addr_t, alignment, CMA_MIN_ALIGNMENT_BYTES); if (fixed && base & (alignment - 1)) { ret = -EINVAL; pr_err("Region at %pa must be aligned to %pa bytes\n", &base, &alignment); goto err; } base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); if (!base) fixed = false; /* size should be aligned with order_per_bit */ if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; /* * If allocating at a fixed base the request region must not cross the * low/high memory boundary. */ if (fixed && base < highmem_start && base + size > highmem_start) { ret = -EINVAL; pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", &base, &highmem_start); goto err; } /* * If the limit is unspecified or above the memblock end, its effective * value will be the memblock end. Set it explicitly to simplify further * checks. */ if (limit == 0 || limit > memblock_end) limit = memblock_end; if (base + size > limit) { ret = -EINVAL; pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", &size, &base, &limit); goto err; } /* Reserve memory */ if (fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { ret = -EBUSY; goto err; } } else { phys_addr_t addr = 0; /* * If there is enough memory, try a bottom-up allocation first. * It will place the new cma area close to the start of the node * and guarantee that the compaction is moving pages out of the * cma area and not into it. * Avoid using first 4GB to not interfere with constrained zones * like DMA/DMA32. */ #ifdef CONFIG_PHYS_ADDR_T_64BIT if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) { memblock_set_bottom_up(true); addr = memblock_alloc_range_nid(size, alignment, SZ_4G, limit, nid, true); memblock_set_bottom_up(false); } #endif /* * All pages in the reserved area must come from the same zone. * If the requested region crosses the low/high memory boundary, * try allocating from high memory first and fall back to low * memory in case of failure. */ if (!addr && base < highmem_start && limit > highmem_start) { addr = memblock_alloc_range_nid(size, alignment, highmem_start, limit, nid, true); limit = highmem_start; } if (!addr) { addr = memblock_alloc_range_nid(size, alignment, base, limit, nid, true); if (!addr) { ret = -ENOMEM; goto err; } } /* * kmemleak scans/reads tracked objects for pointers to other * objects but this address isn't mapped and accessible */ kmemleak_ignore_phys(addr); base = addr; } ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma); if (ret) goto free_mem; pr_info("Reserved %ld MiB at %pa on node %d\n", (unsigned long)size / SZ_1M, &base, nid); return 0; free_mem: memblock_phys_free(base, size); err: pr_err("Failed to reserve %ld MiB on node %d\n", (unsigned long)size / SZ_1M, nid); return ret; } static void cma_debug_show_areas(struct cma *cma) { unsigned long next_zero_bit, next_set_bit, nr_zero; unsigned long start = 0; unsigned long nr_part, nr_total = 0; unsigned long nbits = cma_bitmap_maxno(cma); spin_lock_irq(&cma->lock); pr_info("number of available pages: "); for (;;) { next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start); if (next_zero_bit >= nbits) break; next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit); nr_zero = next_set_bit - next_zero_bit; nr_part = nr_zero << cma->order_per_bit; pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part, next_zero_bit); nr_total += nr_part; start = next_zero_bit + nr_zero; } pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count); spin_unlock_irq(&cma->lock); } /** * cma_alloc() - allocate pages from contiguous area * @cma: Contiguous memory region for which the allocation is performed. * @count: Requested number of pages. * @align: Requested alignment of pages (in PAGE_SIZE order). * @no_warn: Avoid printing message about failed allocation * * This function allocates part of contiguous memory on specific * contiguous memory area. */ struct page *cma_alloc(struct cma *cma, unsigned long count, unsigned int align, bool no_warn) { unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; unsigned long i; struct page *page = NULL; int ret = -ENOMEM; const char *name = cma ? cma->name : NULL; trace_cma_alloc_start(name, count, align); if (!cma || !cma->count || !cma->bitmap) return page; pr_debug("%s(cma %p, name: %s, count %lu, align %d)\n", __func__, (void *)cma, cma->name, count, align); if (!count) return page; mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); bitmap_maxno = cma_bitmap_maxno(cma); bitmap_count = cma_bitmap_pages_to_bits(cma, count); if (bitmap_count > bitmap_maxno) return page; for (;;) { spin_lock_irq(&cma->lock); bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); if (bitmap_no >= bitmap_maxno) { spin_unlock_irq(&cma->lock); break; } bitmap_set(cma->bitmap, bitmap_no, bitmap_count); /* * It's safe to drop the lock here. We've marked this region for * our exclusive use. If the migration fails we will take the * lock again and unmark it. */ spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; } cma_clear_bitmap(cma, pfn, count); if (ret != -EBUSY) break; pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n", __func__, pfn, pfn_to_page(pfn)); trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), count, align); /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } /* * CMA can allocate multiple page blocks, which results in different * blocks being marked with different tags. Reset the tags to ignore * those page blocks. */ if (page) { for (i = 0; i < count; i++) page_kasan_tag_reset(nth_page(page, i)); } if (ret && !no_warn) { pr_err_ratelimited("%s: %s: alloc failed, req-size: %lu pages, ret: %d\n", __func__, cma->name, count, ret); cma_debug_show_areas(cma); } pr_debug("%s(): returned %p\n", __func__, page); trace_cma_alloc_finish(name, pfn, page, count, align, ret); if (page) { count_vm_event(CMA_ALLOC_SUCCESS); cma_sysfs_account_success_pages(cma, count); } else { count_vm_event(CMA_ALLOC_FAIL); cma_sysfs_account_fail_pages(cma, count); } return page; } bool cma_pages_valid(struct cma *cma, const struct page *pages, unsigned long count) { unsigned long pfn; if (!cma || !pages) return false; pfn = page_to_pfn(pages); if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) { pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); return false; } return true; } /** * cma_release() - release allocated pages * @cma: Contiguous memory region for which the allocation is performed. * @pages: Allocated pages. * @count: Number of allocated pages. * * This function releases memory allocated by cma_alloc(). * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ bool cma_release(struct cma *cma, const struct page *pages, unsigned long count) { unsigned long pfn; if (!cma_pages_valid(cma, pages, count)) return false; pr_debug("%s(page %p, count %lu)\n", __func__, (void *)pages, count); pfn = page_to_pfn(pages); VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); free_contig_range(pfn, count); cma_clear_bitmap(cma, pfn, count); cma_sysfs_account_release_pages(cma, count); trace_cma_release(cma->name, pfn, pages, count); return true; } int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) { int i; for (i = 0; i < cma_area_count; i++) { int ret = it(&cma_areas[i], data); if (ret) return ret; } return 0; } |
| 1 1 1 1 1 1 1 82 82 82 82 48 24 82 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_quota.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_qm.h" STATIC void xfs_fill_statvfs_from_dquot( struct kstatfs *statp, struct xfs_dquot *dqp) { uint64_t limit; limit = dqp->q_blk.softlimit ? dqp->q_blk.softlimit : dqp->q_blk.hardlimit; if (limit && statp->f_blocks > limit) { statp->f_blocks = limit; statp->f_bfree = statp->f_bavail = (statp->f_blocks > dqp->q_blk.reserved) ? (statp->f_blocks - dqp->q_blk.reserved) : 0; } limit = dqp->q_ino.softlimit ? dqp->q_ino.softlimit : dqp->q_ino.hardlimit; if (limit && statp->f_files > limit) { statp->f_files = limit; statp->f_ffree = (statp->f_files > dqp->q_ino.reserved) ? (statp->f_files - dqp->q_ino.reserved) : 0; } } /* * Directory tree accounting is implemented using project quotas, where * the project identifier is inherited from parent directories. * A statvfs (df, etc.) of a directory that is using project quota should * return a statvfs of the project, not the entire filesystem. * This makes such trees appear as if they are filesystems in themselves. */ void xfs_qm_statvfs( struct xfs_inode *ip, struct kstatfs *statp) { struct xfs_mount *mp = ip->i_mount; struct xfs_dquot *dqp; if (!xfs_qm_dqget(mp, ip->i_projid, XFS_DQTYPE_PROJ, false, &dqp)) { xfs_fill_statvfs_from_dquot(statp, dqp); xfs_qm_dqput(dqp); } } int xfs_qm_newmount( xfs_mount_t *mp, uint *needquotamount, uint *quotaflags) { uint quotaondisk; uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0; quotaondisk = xfs_has_quota(mp) && (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT); if (quotaondisk) { uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT; pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT; gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT; } /* * If the device itself is read-only, we can't allow * the user to change the state of quota on the mount - * this would generate a transaction on the ro device, * which would lead to an I/O error and shutdown */ if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) || (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) || (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || (!gquotaondisk && XFS_IS_GQUOTA_ON(mp)) || (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) || (!pquotaondisk && XFS_IS_PQUOTA_ON(mp))) && xfs_dev_is_read_only(mp, "changing quota state")) { xfs_warn(mp, "please mount with%s%s%s%s.", (!quotaondisk ? "out quota" : ""), (uquotaondisk ? " usrquota" : ""), (gquotaondisk ? " grpquota" : ""), (pquotaondisk ? " prjquota" : "")); return -EPERM; } if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { /* * Call mount_quotas at this point only if we won't have to do * a quotacheck. */ if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) { /* * If an error occurred, qm_mount_quotas code * has already disabled quotas. So, just finish * mounting, and get on with the boring life * without disk quotas. */ xfs_qm_mount_quotas(mp); } else { /* * Clear the quota flags, but remember them. This * is so that the quota code doesn't get invoked * before we're ready. This can happen when an * inode goes inactive and wants to free blocks, * or via xfs_log_mount_finish. */ *needquotamount = true; *quotaflags = mp->m_qflags; mp->m_qflags = 0; } } return 0; } |
| 7 1 1 1 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 | // SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <net/ip.h> #include <net/ipv6.h> #include <linux/sctp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_sctp.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Kiran Kumar Immidi"); MODULE_DESCRIPTION("Xtables: SCTP protocol packet match"); MODULE_ALIAS("ipt_sctp"); MODULE_ALIAS("ip6t_sctp"); #define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \ || (!!((invflag) & (option)) ^ (cond))) static bool match_flags(const struct xt_sctp_flag_info *flag_info, const int flag_count, u_int8_t chunktype, u_int8_t chunkflags) { int i; for (i = 0; i < flag_count; i++) if (flag_info[i].chunktype == chunktype) return (chunkflags & flag_info[i].flag_mask) == flag_info[i].flag; return true; } static inline bool match_packet(const struct sk_buff *skb, unsigned int offset, const struct xt_sctp_info *info, bool *hotdrop) { u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)]; const struct sctp_chunkhdr *sch; struct sctp_chunkhdr _sch; int chunk_match_type = info->chunk_match_type; const struct xt_sctp_flag_info *flag_info = info->flag_info; int flag_count = info->flag_count; #ifdef DEBUG int i = 0; #endif if (chunk_match_type == SCTP_CHUNK_MATCH_ALL) SCTP_CHUNKMAP_COPY(chunkmapcopy, info->chunkmap); do { sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch); if (sch == NULL || sch->length == 0) { pr_debug("Dropping invalid SCTP packet.\n"); *hotdrop = true; return false; } #ifdef DEBUG pr_debug("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d" "\tflags: %x\n", ++i, offset, sch->type, htons(sch->length), sch->flags); #endif offset += SCTP_PAD4(ntohs(sch->length)); pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset); if (SCTP_CHUNKMAP_IS_SET(info->chunkmap, sch->type)) { switch (chunk_match_type) { case SCTP_CHUNK_MATCH_ANY: if (match_flags(flag_info, flag_count, sch->type, sch->flags)) { return true; } break; case SCTP_CHUNK_MATCH_ALL: if (match_flags(flag_info, flag_count, sch->type, sch->flags)) SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type); break; case SCTP_CHUNK_MATCH_ONLY: if (!match_flags(flag_info, flag_count, sch->type, sch->flags)) return false; break; } } else { switch (chunk_match_type) { case SCTP_CHUNK_MATCH_ONLY: return false; } } } while (offset < skb->len); switch (chunk_match_type) { case SCTP_CHUNK_MATCH_ALL: return SCTP_CHUNKMAP_IS_CLEAR(chunkmapcopy); case SCTP_CHUNK_MATCH_ANY: return false; case SCTP_CHUNK_MATCH_ONLY: return true; } /* This will never be reached, but required to stop compiler whine */ return false; } static bool sctp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_sctp_info *info = par->matchinfo; const struct sctphdr *sh; struct sctphdr _sh; if (par->fragoff != 0) { pr_debug("Dropping non-first fragment.. FIXME\n"); return false; } sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh); if (sh == NULL) { pr_debug("Dropping evil TCP offset=0 tinygram.\n"); par->hotdrop = true; return false; } pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); return SCCHECK(ntohs(sh->source) >= info->spts[0] && ntohs(sh->source) <= info->spts[1], XT_SCTP_SRC_PORTS, info->flags, info->invflags) && SCCHECK(ntohs(sh->dest) >= info->dpts[0] && ntohs(sh->dest) <= info->dpts[1], XT_SCTP_DEST_PORTS, info->flags, info->invflags) && SCCHECK(match_packet(skb, par->thoff + sizeof(_sh), info, &par->hotdrop), XT_SCTP_CHUNK_TYPES, info->flags, info->invflags); } static int sctp_mt_check(const struct xt_mtchk_param *par) { const struct xt_sctp_info *info = par->matchinfo; if (info->flag_count > ARRAY_SIZE(info->flag_info)) return -EINVAL; if (info->flags & ~XT_SCTP_VALID_FLAGS) return -EINVAL; if (info->invflags & ~XT_SCTP_VALID_FLAGS) return -EINVAL; if (info->invflags & ~info->flags) return -EINVAL; if (!(info->flags & XT_SCTP_CHUNK_TYPES)) return 0; if (info->chunk_match_type & (SCTP_CHUNK_MATCH_ALL | SCTP_CHUNK_MATCH_ANY | SCTP_CHUNK_MATCH_ONLY)) return 0; return -EINVAL; } static struct xt_match sctp_mt_reg[] __read_mostly = { { .name = "sctp", .family = NFPROTO_IPV4, .checkentry = sctp_mt_check, .match = sctp_mt, .matchsize = sizeof(struct xt_sctp_info), .proto = IPPROTO_SCTP, .me = THIS_MODULE }, { .name = "sctp", .family = NFPROTO_IPV6, .checkentry = sctp_mt_check, .match = sctp_mt, .matchsize = sizeof(struct xt_sctp_info), .proto = IPPROTO_SCTP, .me = THIS_MODULE }, }; static int __init sctp_mt_init(void) { return xt_register_matches(sctp_mt_reg, ARRAY_SIZE(sctp_mt_reg)); } static void __exit sctp_mt_exit(void) { xt_unregister_matches(sctp_mt_reg, ARRAY_SIZE(sctp_mt_reg)); } module_init(sctp_mt_init); module_exit(sctp_mt_exit); |
| 437 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef LINUX_IOMAP_H #define LINUX_IOMAP_H 1 #include <linux/atomic.h> #include <linux/bitmap.h> #include <linux/blk_types.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/mm_types.h> #include <linux/blkdev.h> struct address_space; struct fiemap_extent_info; struct inode; struct iomap_iter; struct iomap_dio; struct iomap_writepage_ctx; struct iov_iter; struct kiocb; struct page; struct vm_area_struct; struct vm_fault; /* * Types of block ranges for iomap mappings: */ #define IOMAP_HOLE 0 /* no blocks allocated, need allocation */ #define IOMAP_DELALLOC 1 /* delayed allocation blocks */ #define IOMAP_MAPPED 2 /* blocks allocated at @addr */ #define IOMAP_UNWRITTEN 3 /* blocks allocated at @addr in unwritten state */ #define IOMAP_INLINE 4 /* data inline in the inode */ /* * Flags reported by the file system from iomap_begin: * * IOMAP_F_NEW indicates that the blocks have been newly allocated and need * zeroing for areas that no data is copied to. * * IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access * written data and requires fdatasync to commit them to persistent storage. * This needs to take into account metadata changes that *may* be made at IO * completion, such as file size updates from direct IO. * * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be * unshared as part a write. * * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block * mappings. * * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of * buffer heads for this mapping. * * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent * rather than a file data extent. */ #define IOMAP_F_NEW (1U << 0) #define IOMAP_F_DIRTY (1U << 1) #define IOMAP_F_SHARED (1U << 2) #define IOMAP_F_MERGED (1U << 3) #ifdef CONFIG_BUFFER_HEAD #define IOMAP_F_BUFFER_HEAD (1U << 4) #else #define IOMAP_F_BUFFER_HEAD 0 #endif /* CONFIG_BUFFER_HEAD */ #define IOMAP_F_XATTR (1U << 5) /* * Flags set by the core iomap code during operations: * * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size * has changed as the result of this write operation. * * IOMAP_F_STALE indicates that the iomap is not valid any longer and the file * range it covers needs to be remapped by the high level before the operation * can proceed. */ #define IOMAP_F_SIZE_CHANGED (1U << 8) #define IOMAP_F_STALE (1U << 9) /* * Flags from 0x1000 up are for file system specific usage: */ #define IOMAP_F_PRIVATE (1U << 12) /* * Magic value for addr: */ #define IOMAP_NULL_ADDR -1ULL /* addr is not valid */ struct iomap_folio_ops; struct iomap { u64 addr; /* disk offset of mapping, bytes */ loff_t offset; /* file offset of mapping, bytes */ u64 length; /* length of mapping, bytes */ u16 type; /* type of mapping */ u16 flags; /* flags for mapping */ struct block_device *bdev; /* block device for I/O */ struct dax_device *dax_dev; /* dax_dev for dax operations */ void *inline_data; void *private; /* filesystem private */ const struct iomap_folio_ops *folio_ops; u64 validity_cookie; /* used with .iomap_valid() */ }; static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos) { return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; } /* * Returns the inline data pointer for logical offset @pos. */ static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos) { return iomap->inline_data + pos - iomap->offset; } /* * Check if the mapping's length is within the valid range for inline data. * This is used to guard against accessing data beyond the page inline_data * points at. */ static inline bool iomap_inline_data_valid(const struct iomap *iomap) { return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data); } /* * When a filesystem sets folio_ops in an iomap mapping it returns, get_folio * and put_folio will be called for each folio written to. This only applies * to buffered writes as unbuffered writes will not typically have folios * associated with them. * * When get_folio succeeds, put_folio will always be called to do any * cleanup work necessary. put_folio is responsible for unlocking and putting * @folio. */ struct iomap_folio_ops { struct folio *(*get_folio)(struct iomap_iter *iter, loff_t pos, unsigned len); void (*put_folio)(struct inode *inode, loff_t pos, unsigned copied, struct folio *folio); /* * Check that the cached iomap still maps correctly to the filesystem's * internal extent map. FS internal extent maps can change while iomap * is iterating a cached iomap, so this hook allows iomap to detect that * the iomap needs to be refreshed during a long running write * operation. * * The filesystem can store internal state (e.g. a sequence number) in * iomap->validity_cookie when the iomap is first mapped to be able to * detect changes between mapping time and whenever .iomap_valid() is * called. * * This is called with the folio over the specified file position held * locked by the iomap code. */ bool (*iomap_valid)(struct inode *inode, const struct iomap *iomap); }; /* * Flags for iomap_begin / iomap_end. No flag implies a read. */ #define IOMAP_WRITE (1 << 0) /* writing, must allocate blocks */ #define IOMAP_ZERO (1 << 1) /* zeroing operation, may skip holes */ #define IOMAP_REPORT (1 << 2) /* report extent status, e.g. FIEMAP */ #define IOMAP_FAULT (1 << 3) /* mapping for page fault */ #define IOMAP_DIRECT (1 << 4) /* direct I/O */ #define IOMAP_NOWAIT (1 << 5) /* do not block */ #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ #define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */ #ifdef CONFIG_FS_DAX #define IOMAP_DAX (1 << 8) /* DAX mapping */ #else #define IOMAP_DAX 0 #endif /* CONFIG_FS_DAX */ struct iomap_ops { /* * Return the existing mapping at pos, or reserve space starting at * pos for up to length, as long as we can do it as a single mapping. * The actual length is returned in iomap->length. */ int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length, unsigned flags, struct iomap *iomap, struct iomap *srcmap); /* * Commit and/or unreserve space previous allocated using iomap_begin. * Written indicates the length of the successful write operation which * needs to be commited, while the rest needs to be unreserved. * Written might be zero if no data was written. */ int (*iomap_end)(struct inode *inode, loff_t pos, loff_t length, ssize_t written, unsigned flags, struct iomap *iomap); }; /** * struct iomap_iter - Iterate through a range of a file * @inode: Set at the start of the iteration and should not change. * @pos: The current file position we are operating on. It is updated by * calls to iomap_iter(). Treat as read-only in the body. * @len: The remaining length of the file segment we're operating on. * It is updated at the same time as @pos. * @processed: The number of bytes processed by the body in the most recent * iteration, or a negative errno. 0 causes the iteration to stop. * @flags: Zero or more of the iomap_begin flags above. * @iomap: Map describing the I/O iteration * @srcmap: Source map for COW operations */ struct iomap_iter { struct inode *inode; loff_t pos; u64 len; s64 processed; unsigned flags; struct iomap iomap; struct iomap srcmap; void *private; }; int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops); /** * iomap_length - length of the current iomap iteration * @iter: iteration structure * * Returns the length that the operation applies to for the current iteration. */ static inline u64 iomap_length(const struct iomap_iter *iter) { u64 end = iter->iomap.offset + iter->iomap.length; if (iter->srcmap.type != IOMAP_HOLE) end = min(end, iter->srcmap.offset + iter->srcmap.length); return min(iter->len, end - iter->pos); } /** * iomap_iter_srcmap - return the source map for the current iomap iteration * @i: iteration structure * * Write operations on file systems with reflink support might require a * source and a destination map. This function retourns the source map * for a given operation, which may or may no be identical to the destination * map in &i->iomap. */ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i) { if (i->srcmap.type != IOMAP_HOLE) return &i->srcmap; return &i->iomap; } ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); int iomap_file_buffered_write_punch_delalloc(struct inode *inode, struct iomap *iomap, loff_t pos, loff_t length, ssize_t written, int (*punch)(struct inode *inode, loff_t pos, loff_t length)); int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len); bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len); bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio); int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops); int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, const struct iomap_ops *ops); loff_t iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops); loff_t iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops); sector_t iomap_bmap(struct address_space *mapping, sector_t bno, const struct iomap_ops *ops); /* * Structure for writeback I/O completions. */ struct iomap_ioend { struct list_head io_list; /* next ioend in chain */ u16 io_type; u16 io_flags; /* IOMAP_F_* */ struct inode *io_inode; /* file being written to */ size_t io_size; /* size of the extent */ loff_t io_offset; /* offset in the file */ sector_t io_sector; /* start sector of ioend */ struct bio io_bio; /* MUST BE LAST! */ }; static inline struct iomap_ioend *iomap_ioend_from_bio(struct bio *bio) { return container_of(bio, struct iomap_ioend, io_bio); } struct iomap_writeback_ops { /* * Required, maps the blocks so that writeback can be performed on * the range starting at offset. * * Can return arbitrarily large regions, but we need to call into it at * least once per folio to allow the file systems to synchronize with * the write path that could be invalidating mappings. * * An existing mapping from a previous call to this method can be reused * by the file system if it is still valid. */ int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode, loff_t offset, unsigned len); /* * Optional, allows the file systems to perform actions just before * submitting the bio and/or override the bio end_io handler for complex * operations like copy on write extent manipulation or unwritten extent * conversions. */ int (*prepare_ioend)(struct iomap_ioend *ioend, int status); /* * Optional, allows the file system to discard state on a page where * we failed to submit any I/O. */ void (*discard_folio)(struct folio *folio, loff_t pos); }; struct iomap_writepage_ctx { struct iomap iomap; struct iomap_ioend *ioend; const struct iomap_writeback_ops *ops; u32 nr_folios; /* folios added to the ioend */ }; void iomap_finish_ioends(struct iomap_ioend *ioend, int error); void iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends); void iomap_sort_ioends(struct list_head *ioend_list); int iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, const struct iomap_writeback_ops *ops); /* * Flags for direct I/O ->end_io: */ #define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */ #define IOMAP_DIO_COW (1 << 1) /* covers COW extent(s) */ struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); void (*submit_io)(const struct iomap_iter *iter, struct bio *bio, loff_t file_offset); /* * Filesystems wishing to attach private information to a direct io bio * must provide a ->submit_io method that attaches the additional * information to the bio and changes the ->bi_end_io callback to a * custom function. This function should, at a minimum, perform any * relevant post-processing of the bio and end with a call to * iomap_dio_bio_end_io. */ struct bio_set *bio_set; }; /* * Wait for the I/O to complete in iomap_dio_rw even if the kiocb is not * synchronous. */ #define IOMAP_DIO_FORCE_WAIT (1 << 0) /* * Do not allocate blocks or zero partial blocks, but instead fall back to * the caller by returning -EAGAIN. Used to optimize direct I/O writes that * are not aligned to the file system block size. */ #define IOMAP_DIO_OVERWRITE_ONLY (1 << 1) /* * When a page fault occurs, return a partial synchronous result and allow * the caller to retry the rest of the operation after dealing with the page * fault. */ #define IOMAP_DIO_PARTIAL (1 << 2) ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags, void *private, size_t done_before); struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops, const struct iomap_dio_ops *dops, unsigned int dio_flags, void *private, size_t done_before); ssize_t iomap_dio_complete(struct iomap_dio *dio); void iomap_dio_bio_end_io(struct bio *bio); #ifdef CONFIG_SWAP struct file; struct swap_info_struct; int iomap_swapfile_activate(struct swap_info_struct *sis, struct file *swap_file, sector_t *pagespan, const struct iomap_ops *ops); #else # define iomap_swapfile_activate(sis, swapfile, pagespan, ops) (-EIO) #endif /* CONFIG_SWAP */ #endif /* LINUX_IOMAP_H */ |
| 15 40 196 10 191 191 191 174 174 174 159 37 37 37 196 196 196 183 174 154 6 6 34 34 28 8 33 15 15 14 8 208 208 208 6 1 5 1 183 184 184 20 184 236 12 30 214 24 2 183 28 28 183 183 183 103 103 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_btree.h" #include "xfs_bmap_btree.h" #include "xfs_bmap.h" #include "xfs_error.h" #include "xfs_trace.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_dir2_priv.h" #include "xfs_attr_leaf.h" #include "xfs_types.h" #include "xfs_errortag.h" #include "xfs_health.h" #include "xfs_symlink_remote.h" struct kmem_cache *xfs_ifork_cache; void xfs_init_local_fork( struct xfs_inode *ip, int whichfork, const void *data, int64_t size) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); int mem_size = size; bool zero_terminate; /* * If we are using the local fork to store a symlink body we need to * zero-terminate it so that we can pass it back to the VFS directly. * Overallocate the in-memory fork by one for that and add a zero * to terminate it below. */ zero_terminate = S_ISLNK(VFS_I(ip)->i_mode); if (zero_terminate) mem_size++; if (size) { char *new_data = kmalloc(mem_size, GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); memcpy(new_data, data, size); if (zero_terminate) new_data[size] = '\0'; ifp->if_data = new_data; } else { ifp->if_data = NULL; } ifp->if_bytes = size; } /* * The file is in-lined in the on-disk inode. */ STATIC int xfs_iformat_local( struct xfs_inode *ip, struct xfs_dinode *dip, int whichfork, int size) { /* * If the size is unreasonable, then something * is wrong and we just bail out rather than crash in * kmalloc() or memcpy() below. */ if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { xfs_warn(ip->i_mount, "corrupt inode %llu (bad size %d for local fork, size = %zd).", (unsigned long long) ip->i_ino, size, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); xfs_inode_verifier_error(ip, -EFSCORRUPTED, "xfs_iformat_local", dip, sizeof(*dip), __this_address); xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); return -EFSCORRUPTED; } xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); return 0; } /* * The file consists of a set of extents all of which fit into the on-disk * inode. */ STATIC int xfs_iformat_extents( struct xfs_inode *ip, struct xfs_dinode *dip, int whichfork) { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); int state = xfs_bmap_fork_to_state(whichfork); xfs_extnum_t nex = xfs_dfork_nextents(dip, whichfork); int size = nex * sizeof(xfs_bmbt_rec_t); struct xfs_iext_cursor icur; struct xfs_bmbt_rec *dp; struct xfs_bmbt_irec new; int i; /* * If the number of extents is unreasonable, then something is wrong and * we just bail out rather than crash in kmalloc() or memcpy() below. */ if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, mp, whichfork))) { xfs_warn(ip->i_mount, "corrupt inode %llu ((a)extents = %llu).", ip->i_ino, nex); xfs_inode_verifier_error(ip, -EFSCORRUPTED, "xfs_iformat_extents(1)", dip, sizeof(*dip), __this_address); xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); return -EFSCORRUPTED; } ifp->if_bytes = 0; ifp->if_data = NULL; ifp->if_height = 0; if (size) { dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork); xfs_iext_first(ifp, &icur); for (i = 0; i < nex; i++, dp++) { xfs_failaddr_t fa; xfs_bmbt_disk_get_all(dp, &new); fa = xfs_bmap_validate_extent(ip, whichfork, &new); if (fa) { xfs_inode_verifier_error(ip, -EFSCORRUPTED, "xfs_iformat_extents(2)", dp, sizeof(*dp), fa); xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); return xfs_bmap_complain_bad_rec(ip, whichfork, fa, &new); } xfs_iext_insert(ip, &icur, &new, state); trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); xfs_iext_next(ifp, &icur); } } return 0; } /* * The file has too many extents to fit into * the inode, so they are in B-tree format. * Allocate a buffer for the root of the B-tree * and copy the root into it. The i_extents * field will remain NULL until all of the * extents are read in (when they are needed). */ STATIC int xfs_iformat_btree( struct xfs_inode *ip, struct xfs_dinode *dip, int whichfork) { struct xfs_mount *mp = ip->i_mount; xfs_bmdr_block_t *dfp; struct xfs_ifork *ifp; /* REFERENCED */ int nrecs; int size; int level; ifp = xfs_ifork_ptr(ip, whichfork); dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork); size = XFS_BMAP_BROOT_SPACE(mp, dfp); nrecs = be16_to_cpu(dfp->bb_numrecs); level = be16_to_cpu(dfp->bb_level); /* * blow out if -- fork has less extents than can fit in * fork (fork shouldn't be a btree format), root btree * block has more records than can fit into the fork, * or the number of extents is greater than the number of * blocks. */ if (unlikely(ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork) || nrecs == 0 || XFS_BMDR_SPACE_CALC(nrecs) > XFS_DFORK_SIZE(dip, mp, whichfork) || ifp->if_nextents > ip->i_nblocks) || level == 0 || level > XFS_BM_MAXLEVELS(mp, whichfork)) { xfs_warn(mp, "corrupt inode %llu (btree).", (unsigned long long) ip->i_ino); xfs_inode_verifier_error(ip, -EFSCORRUPTED, "xfs_iformat_btree", dfp, size, __this_address); xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); return -EFSCORRUPTED; } ifp->if_broot_bytes = size; ifp->if_broot = kmalloc(size, GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); ASSERT(ifp->if_broot != NULL); /* * Copy and convert from the on-disk structure * to the in-memory structure. */ xfs_bmdr_to_bmbt(ip, dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork), ifp->if_broot, size); ifp->if_bytes = 0; ifp->if_data = NULL; ifp->if_height = 0; return 0; } int xfs_iformat_data_fork( struct xfs_inode *ip, struct xfs_dinode *dip) { struct inode *inode = VFS_I(ip); int error; /* * Initialize the extent count early, as the per-format routines may * depend on it. Use release semantics to set needextents /after/ we * set the format. This ensures that we can use acquire semantics on * needextents in xfs_need_iread_extents() and be guaranteed to see a * valid format value after that load. */ ip->i_df.if_format = dip->di_format; ip->i_df.if_nextents = xfs_dfork_data_extents(dip); smp_store_release(&ip->i_df.if_needextents, ip->i_df.if_format == XFS_DINODE_FMT_BTREE ? 1 : 0); switch (inode->i_mode & S_IFMT) { case S_IFIFO: case S_IFCHR: case S_IFBLK: case S_IFSOCK: ip->i_disk_size = 0; inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip)); return 0; case S_IFREG: case S_IFLNK: case S_IFDIR: switch (ip->i_df.if_format) { case XFS_DINODE_FMT_LOCAL: error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, be64_to_cpu(dip->di_size)); if (!error) error = xfs_ifork_verify_local_data(ip); return error; case XFS_DINODE_FMT_EXTENTS: return xfs_iformat_extents(ip, dip, XFS_DATA_FORK); case XFS_DINODE_FMT_BTREE: return xfs_iformat_btree(ip, dip, XFS_DATA_FORK); default: xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, sizeof(*dip), __this_address); xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); return -EFSCORRUPTED; } break; default: xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, sizeof(*dip), __this_address); xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); return -EFSCORRUPTED; } } static uint16_t xfs_dfork_attr_shortform_size( struct xfs_dinode *dip) { struct xfs_attr_sf_hdr *sf = XFS_DFORK_APTR(dip); return be16_to_cpu(sf->totsize); } void xfs_ifork_init_attr( struct xfs_inode *ip, enum xfs_dinode_fmt format, xfs_extnum_t nextents) { /* * Initialize the extent count early, as the per-format routines may * depend on it. Use release semantics to set needextents /after/ we * set the format. This ensures that we can use acquire semantics on * needextents in xfs_need_iread_extents() and be guaranteed to see a * valid format value after that load. */ ip->i_af.if_format = format; ip->i_af.if_nextents = nextents; smp_store_release(&ip->i_af.if_needextents, ip->i_af.if_format == XFS_DINODE_FMT_BTREE ? 1 : 0); } void xfs_ifork_zap_attr( struct xfs_inode *ip) { xfs_idestroy_fork(&ip->i_af); memset(&ip->i_af, 0, sizeof(struct xfs_ifork)); ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS; } int xfs_iformat_attr_fork( struct xfs_inode *ip, struct xfs_dinode *dip) { xfs_extnum_t naextents = xfs_dfork_attr_extents(dip); int error = 0; /* * Initialize the extent count early, as the per-format routines may * depend on it. */ xfs_ifork_init_attr(ip, dip->di_aformat, naextents); switch (ip->i_af.if_format) { case XFS_DINODE_FMT_LOCAL: error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, xfs_dfork_attr_shortform_size(dip)); if (!error) error = xfs_ifork_verify_local_attr(ip); break; case XFS_DINODE_FMT_EXTENTS: error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK); break; case XFS_DINODE_FMT_BTREE: error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); break; default: xfs_inode_verifier_error(ip, error, __func__, dip, sizeof(*dip), __this_address); xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); error = -EFSCORRUPTED; break; } if (error) xfs_ifork_zap_attr(ip); return error; } /* * Reallocate the space for if_broot based on the number of records * being added or deleted as indicated in rec_diff. Move the records * and pointers in if_broot to fit the new size. When shrinking this * will eliminate holes between the records and pointers created by * the caller. When growing this will create holes to be filled in * by the caller. * * The caller must not request to add more records than would fit in * the on-disk inode root. If the if_broot is currently NULL, then * if we are adding records, one will be allocated. The caller must also * not request that the number of records go below zero, although * it can go to zero. * * ip -- the inode whose if_broot area is changing * ext_diff -- the change in the number of records, positive or negative, * requested for the if_broot array. */ void xfs_iroot_realloc( xfs_inode_t *ip, int rec_diff, int whichfork) { struct xfs_mount *mp = ip->i_mount; int cur_max; struct xfs_ifork *ifp; struct xfs_btree_block *new_broot; int new_max; size_t new_size; char *np; char *op; /* * Handle the degenerate case quietly. */ if (rec_diff == 0) { return; } ifp = xfs_ifork_ptr(ip, whichfork); if (rec_diff > 0) { /* * If there wasn't any memory allocated before, just * allocate it now and get out. */ if (ifp->if_broot_bytes == 0) { new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, rec_diff); ifp->if_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL); ifp->if_broot_bytes = (int)new_size; return; } /* * If there is already an existing if_broot, then we need * to realloc() it and shift the pointers to their new * location. The records don't change location because * they are kept butted up against the btree block header. */ cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); new_max = cur_max + rec_diff; new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); ifp->if_broot = krealloc(ifp->if_broot, new_size, GFP_KERNEL | __GFP_NOFAIL); op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, ifp->if_broot_bytes); np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, (int)new_size); ifp->if_broot_bytes = (int)new_size; ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= xfs_inode_fork_size(ip, whichfork)); memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t)); return; } /* * rec_diff is less than 0. In this case, we are shrinking the * if_broot buffer. It must already exist. If we go to zero * records, just get rid of the root and clear the status bit. */ ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0)); cur_max = xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0); new_max = cur_max + rec_diff; ASSERT(new_max >= 0); if (new_max > 0) new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max); else new_size = 0; if (new_size > 0) { new_broot = kmalloc(new_size, GFP_KERNEL | __GFP_NOFAIL); /* * First copy over the btree block header. */ memcpy(new_broot, ifp->if_broot, XFS_BMBT_BLOCK_LEN(ip->i_mount)); } else { new_broot = NULL; } /* * Only copy the records and pointers if there are any. */ if (new_max > 0) { /* * First copy the records. */ op = (char *)XFS_BMBT_REC_ADDR(mp, ifp->if_broot, 1); np = (char *)XFS_BMBT_REC_ADDR(mp, new_broot, 1); memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t)); /* * Then copy the pointers. */ op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1, ifp->if_broot_bytes); np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, (int)new_size); memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t)); } kfree(ifp->if_broot); ifp->if_broot = new_broot; ifp->if_broot_bytes = (int)new_size; if (ifp->if_broot) ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= xfs_inode_fork_size(ip, whichfork)); return; } /* * This is called when the amount of space needed for if_data * is increased or decreased. The change in size is indicated by * the number of bytes that need to be added or deleted in the * byte_diff parameter. * * If the amount of space needed has decreased below the size of the * inline buffer, then switch to using the inline buffer. Otherwise, * use krealloc() or kmalloc() to adjust the size of the buffer * to what is needed. * * ip -- the inode whose if_data area is changing * byte_diff -- the change in the number of bytes, positive or negative, * requested for the if_data array. */ void * xfs_idata_realloc( struct xfs_inode *ip, int64_t byte_diff, int whichfork) { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); int64_t new_size = ifp->if_bytes + byte_diff; ASSERT(new_size >= 0); ASSERT(new_size <= xfs_inode_fork_size(ip, whichfork)); if (byte_diff) { ifp->if_data = krealloc(ifp->if_data, new_size, GFP_KERNEL | __GFP_NOFAIL); if (new_size == 0) ifp->if_data = NULL; ifp->if_bytes = new_size; } return ifp->if_data; } /* Free all memory and reset a fork back to its initial state. */ void xfs_idestroy_fork( struct xfs_ifork *ifp) { if (ifp->if_broot != NULL) { kfree(ifp->if_broot); ifp->if_broot = NULL; } switch (ifp->if_format) { case XFS_DINODE_FMT_LOCAL: kfree(ifp->if_data); ifp->if_data = NULL; break; case XFS_DINODE_FMT_EXTENTS: case XFS_DINODE_FMT_BTREE: if (ifp->if_height) xfs_iext_destroy(ifp); break; } } /* * Convert in-core extents to on-disk form * * In the case of the data fork, the in-core and on-disk fork sizes can be * different due to delayed allocation extents. We only copy on-disk extents * here, so callers must always use the physical fork size to determine the * size of the buffer passed to this routine. We will return the size actually * used. */ int xfs_iextents_copy( struct xfs_inode *ip, struct xfs_bmbt_rec *dp, int whichfork) { int state = xfs_bmap_fork_to_state(whichfork); struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); struct xfs_iext_cursor icur; struct xfs_bmbt_irec rec; int64_t copied = 0; xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED); ASSERT(ifp->if_bytes > 0); for_each_xfs_iext(ifp, &icur, &rec) { if (isnullstartblock(rec.br_startblock)) continue; ASSERT(xfs_bmap_validate_extent(ip, whichfork, &rec) == NULL); xfs_bmbt_disk_set_all(dp, &rec); trace_xfs_write_extent(ip, &icur, state, _RET_IP_); copied += sizeof(struct xfs_bmbt_rec); dp++; } ASSERT(copied > 0); ASSERT(copied <= ifp->if_bytes); return copied; } /* * Each of the following cases stores data into the same region * of the on-disk inode, so only one of them can be valid at * any given time. While it is possible to have conflicting formats * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is * in EXTENTS format, this can only happen when the fork has * changed formats after being modified but before being flushed. * In these cases, the format always takes precedence, because the * format indicates the current state of the fork. */ void xfs_iflush_fork( struct xfs_inode *ip, struct xfs_dinode *dip, struct xfs_inode_log_item *iip, int whichfork) { char *cp; struct xfs_ifork *ifp; xfs_mount_t *mp; static const short brootflag[2] = { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; static const short dataflag[2] = { XFS_ILOG_DDATA, XFS_ILOG_ADATA }; static const short extflag[2] = { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; if (!iip) return; ifp = xfs_ifork_ptr(ip, whichfork); /* * This can happen if we gave up in iformat in an error path, * for the attribute fork. */ if (!ifp) { ASSERT(whichfork == XFS_ATTR_FORK); return; } cp = XFS_DFORK_PTR(dip, whichfork); mp = ip->i_mount; switch (ifp->if_format) { case XFS_DINODE_FMT_LOCAL: if ((iip->ili_fields & dataflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(ifp->if_data != NULL); ASSERT(ifp->if_bytes <= xfs_inode_fork_size(ip, whichfork)); memcpy(cp, ifp->if_data, ifp->if_bytes); } break; case XFS_DINODE_FMT_EXTENTS: if ((iip->ili_fields & extflag[whichfork]) && (ifp->if_bytes > 0)) { ASSERT(ifp->if_nextents > 0); (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, whichfork); } break; case XFS_DINODE_FMT_BTREE: if ((iip->ili_fields & brootflag[whichfork]) && (ifp->if_broot_bytes > 0)) { ASSERT(ifp->if_broot != NULL); ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= xfs_inode_fork_size(ip, whichfork)); xfs_bmbt_to_bmdr(mp, ifp->if_broot, ifp->if_broot_bytes, (xfs_bmdr_block_t *)cp, XFS_DFORK_SIZE(dip, mp, whichfork)); } break; case XFS_DINODE_FMT_DEV: if (iip->ili_fields & XFS_ILOG_DEV) { ASSERT(whichfork == XFS_DATA_FORK); xfs_dinode_put_rdev(dip, linux_to_xfs_dev_t(VFS_I(ip)->i_rdev)); } break; default: ASSERT(0); break; } } /* Convert bmap state flags to an inode fork. */ struct xfs_ifork * xfs_iext_state_to_fork( struct xfs_inode *ip, int state) { if (state & BMAP_COWFORK) return ip->i_cowfp; else if (state & BMAP_ATTRFORK) return &ip->i_af; return &ip->i_df; } /* * Initialize an inode's copy-on-write fork. */ void xfs_ifork_init_cow( struct xfs_inode *ip) { if (ip->i_cowfp) return; ip->i_cowfp = kmem_cache_zalloc(xfs_ifork_cache, GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL); ip->i_cowfp->if_format = XFS_DINODE_FMT_EXTENTS; } /* Verify the inline contents of the data fork of an inode. */ int xfs_ifork_verify_local_data( struct xfs_inode *ip) { xfs_failaddr_t fa = NULL; switch (VFS_I(ip)->i_mode & S_IFMT) { case S_IFDIR: { struct xfs_mount *mp = ip->i_mount; struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); struct xfs_dir2_sf_hdr *sfp = ifp->if_data; fa = xfs_dir2_sf_verify(mp, sfp, ifp->if_bytes); break; } case S_IFLNK: { struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); fa = xfs_symlink_shortform_verify(ifp->if_data, ifp->if_bytes); break; } default: break; } if (fa) { xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork", ip->i_df.if_data, ip->i_df.if_bytes, fa); return -EFSCORRUPTED; } return 0; } /* Verify the inline contents of the attr fork of an inode. */ int xfs_ifork_verify_local_attr( struct xfs_inode *ip) { struct xfs_ifork *ifp = &ip->i_af; xfs_failaddr_t fa; if (!xfs_inode_has_attr_fork(ip)) { fa = __this_address; } else { struct xfs_ifork *ifp = &ip->i_af; ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL); fa = xfs_attr_shortform_verify(ifp->if_data, ifp->if_bytes); } if (fa) { xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork", ifp->if_data, ifp->if_bytes, fa); return -EFSCORRUPTED; } return 0; } /* * Check if the inode fork supports adding nr_to_add more extents. * * If it doesn't but we can upgrade it to large extent counters, do the upgrade. * If we can't upgrade or are already using big counters but still can't fit the * additional extents, return -EFBIG. */ int xfs_iext_count_extend( struct xfs_trans *tp, struct xfs_inode *ip, int whichfork, uint nr_to_add) { struct xfs_mount *mp = ip->i_mount; bool has_large = xfs_inode_has_large_extent_counts(ip); struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); uint64_t nr_exts; ASSERT(nr_to_add <= XFS_MAX_EXTCNT_UPGRADE_NR); if (whichfork == XFS_COW_FORK) return 0; /* no point in upgrading if if_nextents overflows */ nr_exts = ifp->if_nextents + nr_to_add; if (nr_exts < ifp->if_nextents) return -EFBIG; if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REDUCE_MAX_IEXTENTS) && nr_exts > 10) return -EFBIG; if (nr_exts > xfs_iext_max_nextents(has_large, whichfork)) { if (has_large || !xfs_has_large_extent_counts(mp)) return -EFBIG; ip->i_diflags2 |= XFS_DIFLAG2_NREXT64; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } return 0; } /* Decide if a file mapping is on the realtime device or not. */ bool xfs_ifork_is_realtime( struct xfs_inode *ip, int whichfork) { return XFS_IS_REALTIME_INODE(ip) && whichfork != XFS_ATTR_FORK; } |
| 2 1 1 1 1 1 2 2 1 1 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 | // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "backpointers.h" #include "bkey_methods.h" #include "btree_cache.h" #include "btree_types.h" #include "alloc_background.h" #include "dirent.h" #include "disk_accounting.h" #include "ec.h" #include "error.h" #include "extents.h" #include "inode.h" #include "io_misc.h" #include "lru.h" #include "quota.h" #include "reflink.h" #include "snapshot.h" #include "subvolume.h" #include "xattr.h" const char * const bch2_bkey_types[] = { #define x(name, nr) #name, BCH_BKEY_TYPES() #undef x NULL }; static int deleted_key_validate(struct bch_fs *c, struct bkey_s_c k, enum bch_validate_flags flags) { return 0; } #define bch2_bkey_ops_deleted ((struct bkey_ops) { \ .key_validate = deleted_key_validate, \ }) #define bch2_bkey_ops_whiteout ((struct bkey_ops) { \ .key_validate = deleted_key_validate, \ }) static int empty_val_key_validate(struct bch_fs *c, struct bkey_s_c k, enum bch_validate_flags flags) { int ret = 0; bkey_fsck_err_on(bkey_val_bytes(k.k), c, bkey_val_size_nonzero, "incorrect value size (%zu != 0)", bkey_val_bytes(k.k)); fsck_err: return ret; } #define bch2_bkey_ops_error ((struct bkey_ops) { \ .key_validate = empty_val_key_validate, \ }) static int key_type_cookie_validate(struct bch_fs *c, struct bkey_s_c k, enum bch_validate_flags flags) { return 0; } static void key_type_cookie_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { struct bkey_s_c_cookie ck = bkey_s_c_to_cookie(k); prt_printf(out, "%llu", le64_to_cpu(ck.v->cookie)); } #define bch2_bkey_ops_cookie ((struct bkey_ops) { \ .key_validate = key_type_cookie_validate, \ .val_to_text = key_type_cookie_to_text, \ .min_val_size = 8, \ }) #define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\ .key_validate = empty_val_key_validate, \ }) static int key_type_inline_data_validate(struct bch_fs *c, struct bkey_s_c k, enum bch_validate_flags flags) { return 0; } static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { struct bkey_s_c_inline_data d = bkey_s_c_to_inline_data(k); unsigned datalen = bkey_inline_data_bytes(k.k); prt_printf(out, "datalen %u: %*phN", datalen, min(datalen, 32U), d.v->data); } #define bch2_bkey_ops_inline_data ((struct bkey_ops) { \ .key_validate = key_type_inline_data_validate, \ .val_to_text = key_type_inline_data_to_text, \ }) static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) { bch2_key_resize(l.k, l.k->size + r.k->size); return true; } #define bch2_bkey_ops_set ((struct bkey_ops) { \ .key_validate = empty_val_key_validate, \ .key_merge = key_type_set_merge, \ }) const struct bkey_ops bch2_bkey_ops[] = { #define x(name, nr) [KEY_TYPE_##name] = bch2_bkey_ops_##name, BCH_BKEY_TYPES() #undef x }; const struct bkey_ops bch2_bkey_null_ops = { }; int bch2_bkey_val_validate(struct bch_fs *c, struct bkey_s_c k, enum bch_validate_flags flags) { if (test_bit(BCH_FS_no_invalid_checks, &c->flags)) return 0; const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type); int ret = 0; bkey_fsck_err_on(bkey_val_bytes(k.k) < ops->min_val_size, c, bkey_val_size_too_small, "bad val size (%zu < %u)", bkey_val_bytes(k.k), ops->min_val_size); if (!ops->key_validate) return 0; ret = ops->key_validate(c, k, flags); fsck_err: return ret; } static u64 bch2_key_types_allowed[] = { [BKEY_TYPE_btree] = BIT_ULL(KEY_TYPE_deleted)| BIT_ULL(KEY_TYPE_btree_ptr)| BIT_ULL(KEY_TYPE_btree_ptr_v2), #define x(name, nr, flags, keys) [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys, BCH_BTREE_IDS() #undef x }; const char *bch2_btree_node_type_str(enum btree_node_type type) { return type == BKEY_TYPE_btree ? "internal btree node" : bch2_btree_id_str(type - 1); } int __bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k, enum btree_node_type type, enum bch_validate_flags flags) { if (test_bit(BCH_FS_no_invalid_checks, &c->flags)) return 0; int ret = 0; bkey_fsck_err_on(k.k->u64s < BKEY_U64s, c, bkey_u64s_too_small, "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s); if (type >= BKEY_TYPE_NR) return 0; bkey_fsck_err_on(k.k->type < KEY_TYPE_MAX && (type == BKEY_TYPE_btree || (flags & BCH_VALIDATE_commit)) && !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, bkey_invalid_type_for_btree, "invalid key type for btree %s (%s)", bch2_btree_node_type_str(type), k.k->type < KEY_TYPE_MAX ? bch2_bkey_types[k.k->type] : "(unknown)"); if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) { bkey_fsck_err_on(k.k->size == 0, c, bkey_extent_size_zero, "size == 0"); bkey_fsck_err_on(k.k->size > k.k->p.offset, c, bkey_extent_size_greater_than_offset, "size greater than offset (%u > %llu)", k.k->size, k.k->p.offset); } else { bkey_fsck_err_on(k.k->size, c, bkey_size_nonzero, "size != 0"); } if (type != BKEY_TYPE_btree) { enum btree_id btree = type - 1; if (btree_type_has_snapshots(btree)) { bkey_fsck_err_on(!k.k->p.snapshot, c, bkey_snapshot_zero, "snapshot == 0"); } else if (!btree_type_has_snapshot_field(btree)) { bkey_fsck_err_on(k.k->p.snapshot, c, bkey_snapshot_nonzero, "nonzero snapshot"); } else { /* * btree uses snapshot field but it's not required to be * nonzero */ } bkey_fsck_err_on(bkey_eq(k.k->p, POS_MAX), c, bkey_at_pos_max, "key at POS_MAX"); } fsck_err: return ret; } int bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k, enum btree_node_type type, enum bch_validate_flags flags) { return __bch2_bkey_validate(c, k, type, flags) ?: bch2_bkey_val_validate(c, k, flags); } int bch2_bkey_in_btree_node(struct bch_fs *c, struct btree *b, struct bkey_s_c k, enum bch_validate_flags flags) { int ret = 0; bkey_fsck_err_on(bpos_lt(k.k->p, b->data->min_key), c, bkey_before_start_of_btree_node, "key before start of btree node"); bkey_fsck_err_on(bpos_gt(k.k->p, b->data->max_key), c, bkey_after_end_of_btree_node, "key past end of btree node"); fsck_err: return ret; } void bch2_bpos_to_text(struct printbuf *out, struct bpos pos) { if (bpos_eq(pos, POS_MIN)) prt_printf(out, "POS_MIN"); else if (bpos_eq(pos, POS_MAX)) prt_printf(out, "POS_MAX"); else if (bpos_eq(pos, SPOS_MAX)) prt_printf(out, "SPOS_MAX"); else { if (pos.inode == U64_MAX) prt_printf(out, "U64_MAX"); else prt_printf(out, "%llu", pos.inode); prt_printf(out, ":"); if (pos.offset == U64_MAX) prt_printf(out, "U64_MAX"); else prt_printf(out, "%llu", pos.offset); prt_printf(out, ":"); if (pos.snapshot == U32_MAX) prt_printf(out, "U32_MAX"); else prt_printf(out, "%u", pos.snapshot); } } void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k) { if (k) { prt_printf(out, "u64s %u type ", k->u64s); if (k->type < KEY_TYPE_MAX) prt_printf(out, "%s ", bch2_bkey_types[k->type]); else prt_printf(out, "%u ", k->type); bch2_bpos_to_text(out, k->p); prt_printf(out, " len %u ver %llu", k->size, k->version.lo); } else { prt_printf(out, "(null)"); } } void bch2_val_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type); if (likely(ops->val_to_text)) ops->val_to_text(out, c, k); } void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) { bch2_bkey_to_text(out, k.k); if (bkey_val_bytes(k.k)) { prt_printf(out, ": "); bch2_val_to_text(out, c, k); } } void bch2_bkey_swab_val(struct bkey_s k) { const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type); if (ops->swab) ops->swab(k); } bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k) { const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type); return ops->key_normalize ? ops->key_normalize(c, k) : false; } bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) { const struct bkey_ops *ops = bch2_bkey_type_ops(l.k->type); return ops->key_merge && bch2_bkey_maybe_mergable(l.k, r.k) && (u64) l.k->size + r.k->size <= KEY_SIZE_MAX && !bch2_key_merging_disabled && ops->key_merge(c, l, r); } static const struct old_bkey_type { u8 btree_node_type; u8 old; u8 new; } bkey_renumber_table[] = { {BKEY_TYPE_btree, 128, KEY_TYPE_btree_ptr }, {BKEY_TYPE_extents, 128, KEY_TYPE_extent }, {BKEY_TYPE_extents, 129, KEY_TYPE_extent }, {BKEY_TYPE_extents, 130, KEY_TYPE_reservation }, {BKEY_TYPE_inodes, 128, KEY_TYPE_inode }, {BKEY_TYPE_inodes, 130, KEY_TYPE_inode_generation }, {BKEY_TYPE_dirents, 128, KEY_TYPE_dirent }, {BKEY_TYPE_dirents, 129, KEY_TYPE_hash_whiteout }, {BKEY_TYPE_xattrs, 128, KEY_TYPE_xattr }, {BKEY_TYPE_xattrs, 129, KEY_TYPE_hash_whiteout }, {BKEY_TYPE_alloc, 128, KEY_TYPE_alloc }, {BKEY_TYPE_quotas, 128, KEY_TYPE_quota }, }; void bch2_bkey_renumber(enum btree_node_type btree_node_type, struct bkey_packed *k, int write) { const struct old_bkey_type *i; for (i = bkey_renumber_table; i < bkey_renumber_table + ARRAY_SIZE(bkey_renumber_table); i++) if (btree_node_type == i->btree_node_type && k->type == (write ? i->new : i->old)) { k->type = write ? i->old : i->new; break; } } void __bch2_bkey_compat(unsigned level, enum btree_id btree_id, unsigned version, unsigned big_endian, int write, struct bkey_format *f, struct bkey_packed *k) { const struct bkey_ops *ops; struct bkey uk; unsigned nr_compat = 5; int i; /* * Do these operations in reverse order in the write path: */ for (i = 0; i < nr_compat; i++) switch (!write ? i : nr_compat - 1 - i) { case 0: if (big_endian != CPU_BIG_ENDIAN) { bch2_bkey_swab_key(f, k); } else if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { bch2_bkey_swab_key(f, k); bch2_bkey_swab_key(f, k); } break; case 1: if (version < bcachefs_metadata_version_bkey_renumber) bch2_bkey_renumber(__btree_node_type(level, btree_id), k, write); break; case 2: if (version < bcachefs_metadata_version_inode_btree_change && btree_id == BTREE_ID_inodes) { if (!bkey_packed(k)) { struct bkey_i *u = packed_to_bkey(k); swap(u->k.p.inode, u->k.p.offset); } else if (f->bits_per_field[BKEY_FIELD_INODE] && f->bits_per_field[BKEY_FIELD_OFFSET]) { struct bkey_format tmp = *f, *in = f, *out = &tmp; swap(tmp.bits_per_field[BKEY_FIELD_INODE], tmp.bits_per_field[BKEY_FIELD_OFFSET]); swap(tmp.field_offset[BKEY_FIELD_INODE], tmp.field_offset[BKEY_FIELD_OFFSET]); if (!write) swap(in, out); uk = __bch2_bkey_unpack_key(in, k); swap(uk.p.inode, uk.p.offset); BUG_ON(!bch2_bkey_pack_key(k, &uk, out)); } } break; case 3: if (version < bcachefs_metadata_version_snapshot && (level || btree_type_has_snapshots(btree_id))) { struct bkey_i *u = packed_to_bkey(k); if (u) { u->k.p.snapshot = write ? 0 : U32_MAX; } else { u64 min_packed = le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]); u64 max_packed = min_packed + ~(~0ULL << f->bits_per_field[BKEY_FIELD_SNAPSHOT]); uk = __bch2_bkey_unpack_key(f, k); uk.p.snapshot = write ? min_packed : min_t(u64, U32_MAX, max_packed); BUG_ON(!bch2_bkey_pack_key(k, &uk, f)); } } break; case 4: { struct bkey_s u; if (!bkey_packed(k)) { u = bkey_i_to_s(packed_to_bkey(k)); } else { uk = __bch2_bkey_unpack_key(f, k); u.k = &uk; u.v = bkeyp_val(f, k); } if (big_endian != CPU_BIG_ENDIAN) bch2_bkey_swab_val(u); ops = bch2_bkey_type_ops(k->type); if (ops->compat) ops->compat(btree_id, version, big_endian, write, u); break; } default: BUG(); } } |
| 28 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 | // SPDX-License-Identifier: GPL-2.0 #include <linux/bcd.h> #include <linux/export.h> unsigned _bcd2bin(unsigned char val) { return (val & 0x0f) + (val >> 4) * 10; } EXPORT_SYMBOL(_bcd2bin); unsigned char _bin2bcd(unsigned val) { return ((val / 10) << 4) + val % 10; } EXPORT_SYMBOL(_bin2bcd); |
| 226 233 277 30 76 249 249 259 259 149 149 272 272 272 270 180 180 179 149 31 180 30 31 16 1 4 4 15 15 15 16 16 12 4 1 3 3 1 2696 2384 2759 2694 2779 2778 18 2762 26 2754 164 2752 52 2621 2 6 14 226 10 209 22 18 2 1 3 5 2 1 4 1 3 3 3 2 210 5 12 2 7 259 156 156 677 10 2 23 7 4 5 9 86 18 14 29 129 10 14 16 56 18 18 18 3 8 4 19 2 1 6 8 7 1 1 3 8 1 2 6 2 2 2 1 6 4 8 2 8 3 1 1 23 18 2 10 39 2 3 7 6 6 2 16 1 2 81 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com */ #include <linux/bpf.h> #include <linux/btf.h> #include <linux/bpf-cgroup.h> #include <linux/cgroup.h> #include <linux/rcupdate.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/topology.h> #include <linux/ktime.h> #include <linux/sched.h> #include <linux/uidgid.h> #include <linux/filter.h> #include <linux/ctype.h> #include <linux/jiffies.h> #include <linux/pid_namespace.h> #include <linux/poison.h> #include <linux/proc_ns.h> #include <linux/sched/task.h> #include <linux/security.h> #include <linux/btf_ids.h> #include <linux/bpf_mem_alloc.h> #include <linux/kasan.h> #include "../../lib/kstrtox.h" /* If kernel subsystem is allowing eBPF programs to call this function, * inside its own verifier_ops->get_func_proto() callback it should return * bpf_map_lookup_elem_proto, so that verifier can properly check the arguments * * Different map implementations will rely on rcu in map methods * lookup/update/delete, therefore eBPF programs must run under rcu lock * if program is allowed to access maps, so check rcu_read_lock_held() or * rcu_read_lock_trace_held() in all three functions. */ BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) { WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && !rcu_read_lock_bh_held()); return (unsigned long) map->ops->map_lookup_elem(map, key); } const struct bpf_func_proto bpf_map_lookup_elem_proto = { .func = bpf_map_lookup_elem, .gpl_only = false, .pkt_access = true, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, }; BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, void *, value, u64, flags) { WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && !rcu_read_lock_bh_held()); return map->ops->map_update_elem(map, key, value, flags); } const struct bpf_func_proto bpf_map_update_elem_proto = { .func = bpf_map_update_elem, .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, .arg3_type = ARG_PTR_TO_MAP_VALUE, .arg4_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) { WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() && !rcu_read_lock_bh_held()); return map->ops->map_delete_elem(map, key); } const struct bpf_func_proto bpf_map_delete_elem_proto = { .func = bpf_map_delete_elem, .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, }; BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) { return map->ops->map_push_elem(map, value, flags); } const struct bpf_func_proto bpf_map_push_elem_proto = { .func = bpf_map_push_elem, .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_VALUE, .arg3_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) { return map->ops->map_pop_elem(map, value); } const struct bpf_func_proto bpf_map_pop_elem_proto = { .func = bpf_map_pop_elem, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, }; BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) { return map->ops->map_peek_elem(map, value); } const struct bpf_func_proto bpf_map_peek_elem_proto = { .func = bpf_map_peek_elem, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, }; BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) { WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); return (unsigned long) map->ops->map_lookup_percpu_elem(map, key, cpu); } const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto = { .func = bpf_map_lookup_percpu_elem, .gpl_only = false, .pkt_access = true, .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_MAP_KEY, .arg3_type = ARG_ANYTHING, }; const struct bpf_func_proto bpf_get_prandom_u32_proto = { .func = bpf_user_rnd_u32, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_0(bpf_get_smp_processor_id) { return smp_processor_id(); } const struct bpf_func_proto bpf_get_smp_processor_id_proto = { .func = bpf_get_smp_processor_id, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_0(bpf_get_numa_node_id) { return numa_node_id(); } const struct bpf_func_proto bpf_get_numa_node_id_proto = { .func = bpf_get_numa_node_id, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_0(bpf_ktime_get_ns) { /* NMI safe access to clock monotonic */ return ktime_get_mono_fast_ns(); } const struct bpf_func_proto bpf_ktime_get_ns_proto = { .func = bpf_ktime_get_ns, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_0(bpf_ktime_get_boot_ns) { /* NMI safe access to clock boottime */ return ktime_get_boot_fast_ns(); } const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = { .func = bpf_ktime_get_boot_ns, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_0(bpf_ktime_get_coarse_ns) { return ktime_get_coarse_ns(); } const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto = { .func = bpf_ktime_get_coarse_ns, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_0(bpf_ktime_get_tai_ns) { /* NMI safe access to clock tai */ return ktime_get_tai_fast_ns(); } const struct bpf_func_proto bpf_ktime_get_tai_ns_proto = { .func = bpf_ktime_get_tai_ns, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_0(bpf_get_current_pid_tgid) { struct task_struct *task = current; if (unlikely(!task)) return -EINVAL; return (u64) task->tgid << 32 | task->pid; } const struct bpf_func_proto bpf_get_current_pid_tgid_proto = { .func = bpf_get_current_pid_tgid, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_0(bpf_get_current_uid_gid) { struct task_struct *task = current; kuid_t uid; kgid_t gid; if (unlikely(!task)) return -EINVAL; current_uid_gid(&uid, &gid); return (u64) from_kgid(&init_user_ns, gid) << 32 | from_kuid(&init_user_ns, uid); } const struct bpf_func_proto bpf_get_current_uid_gid_proto = { .func = bpf_get_current_uid_gid, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) { struct task_struct *task = current; if (unlikely(!task)) goto err_clear; /* Verifier guarantees that size > 0 */ strscpy_pad(buf, task->comm, size); return 0; err_clear: memset(buf, 0, size); return -EINVAL; } const struct bpf_func_proto bpf_get_current_comm_proto = { .func = bpf_get_current_comm, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, .arg2_type = ARG_CONST_SIZE, }; #if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) { arch_spinlock_t *l = (void *)lock; union { __u32 val; arch_spinlock_t lock; } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); preempt_disable(); arch_spin_lock(l); } static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) { arch_spinlock_t *l = (void *)lock; arch_spin_unlock(l); preempt_enable(); } #else static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) { atomic_t *l = (void *)lock; BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); do { atomic_cond_read_relaxed(l, !VAL); } while (atomic_xchg(l, 1)); } static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) { atomic_t *l = (void *)lock; atomic_set_release(l, 0); } #endif static DEFINE_PER_CPU(unsigned long, irqsave_flags); static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock) { unsigned long flags; local_irq_save(flags); __bpf_spin_lock(lock); __this_cpu_write(irqsave_flags, flags); } NOTRACE_BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) { __bpf_spin_lock_irqsave(lock); return 0; } const struct bpf_func_proto bpf_spin_lock_proto = { .func = bpf_spin_lock, .gpl_only = false, .ret_type = RET_VOID, .arg1_type = ARG_PTR_TO_SPIN_LOCK, .arg1_btf_id = BPF_PTR_POISON, }; static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock) { unsigned long flags; flags = __this_cpu_read(irqsave_flags); __bpf_spin_unlock(lock); local_irq_restore(flags); } NOTRACE_BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) { __bpf_spin_unlock_irqrestore(lock); return 0; } const struct bpf_func_proto bpf_spin_unlock_proto = { .func = bpf_spin_unlock, .gpl_only = false, .ret_type = RET_VOID, .arg1_type = ARG_PTR_TO_SPIN_LOCK, .arg1_btf_id = BPF_PTR_POISON, }; void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src) { struct bpf_spin_lock *lock; if (lock_src) lock = src + map->record->spin_lock_off; else lock = dst + map->record->spin_lock_off; preempt_disable(); __bpf_spin_lock_irqsave(lock); copy_map_value(map, dst, src); __bpf_spin_unlock_irqrestore(lock); preempt_enable(); } BPF_CALL_0(bpf_jiffies64) { return get_jiffies_64(); } const struct bpf_func_proto bpf_jiffies64_proto = { .func = bpf_jiffies64, .gpl_only = false, .ret_type = RET_INTEGER, }; #ifdef CONFIG_CGROUPS BPF_CALL_0(bpf_get_current_cgroup_id) { struct cgroup *cgrp; u64 cgrp_id; rcu_read_lock(); cgrp = task_dfl_cgroup(current); cgrp_id = cgroup_id(cgrp); rcu_read_unlock(); return cgrp_id; } const struct bpf_func_proto bpf_get_current_cgroup_id_proto = { .func = bpf_get_current_cgroup_id, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_1(bpf_get_current_ancestor_cgroup_id, int, ancestor_level) { struct cgroup *cgrp; struct cgroup *ancestor; u64 cgrp_id; rcu_read_lock(); cgrp = task_dfl_cgroup(current); ancestor = cgroup_ancestor(cgrp, ancestor_level); cgrp_id = ancestor ? cgroup_id(ancestor) : 0; rcu_read_unlock(); return cgrp_id; } const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto = { .func = bpf_get_current_ancestor_cgroup_id, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, }; #endif /* CONFIG_CGROUPS */ #define BPF_STRTOX_BASE_MASK 0x1F static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, unsigned long long *res, bool *is_negative) { unsigned int base = flags & BPF_STRTOX_BASE_MASK; const char *cur_buf = buf; size_t cur_len = buf_len; unsigned int consumed; size_t val_len; char str[64]; if (!buf || !buf_len || !res || !is_negative) return -EINVAL; if (base != 0 && base != 8 && base != 10 && base != 16) return -EINVAL; if (flags & ~BPF_STRTOX_BASE_MASK) return -EINVAL; while (cur_buf < buf + buf_len && isspace(*cur_buf)) ++cur_buf; *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); if (*is_negative) ++cur_buf; consumed = cur_buf - buf; cur_len -= consumed; if (!cur_len) return -EINVAL; cur_len = min(cur_len, sizeof(str) - 1); memcpy(str, cur_buf, cur_len); str[cur_len] = '\0'; cur_buf = str; cur_buf = _parse_integer_fixup_radix(cur_buf, &base); val_len = _parse_integer(cur_buf, base, res); if (val_len & KSTRTOX_OVERFLOW) return -ERANGE; if (val_len == 0) return -EINVAL; cur_buf += val_len; consumed += cur_buf - str; return consumed; } static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, long long *res) { unsigned long long _res; bool is_negative; int err; err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); if (err < 0) return err; if (is_negative) { if ((long long)-_res > 0) return -ERANGE; *res = -_res; } else { if ((long long)_res < 0) return -ERANGE; *res = _res; } return err; } BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, long *, res) { long long _res; int err; err = __bpf_strtoll(buf, buf_len, flags, &_res); if (err < 0) return err; if (_res != (long)_res) return -ERANGE; *res = _res; return err; } const struct bpf_func_proto bpf_strtol_proto = { .func = bpf_strtol, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_LONG, }; BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, unsigned long *, res) { unsigned long long _res; bool is_negative; int err; err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); if (err < 0) return err; if (is_negative) return -EINVAL; if (_res != (unsigned long)_res) return -ERANGE; *res = _res; return err; } const struct bpf_func_proto bpf_strtoul_proto = { .func = bpf_strtoul, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_LONG, }; BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) { return strncmp(s1, s2, s1_sz); } static const struct bpf_func_proto bpf_strncmp_proto = { .func = bpf_strncmp, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_PTR_TO_CONST_STR, }; BPF_CALL_4(bpf_get_ns_current_pid_tgid, u64, dev, u64, ino, struct bpf_pidns_info *, nsdata, u32, size) { struct task_struct *task = current; struct pid_namespace *pidns; int err = -EINVAL; if (unlikely(size != sizeof(struct bpf_pidns_info))) goto clear; if (unlikely((u64)(dev_t)dev != dev)) goto clear; if (unlikely(!task)) goto clear; pidns = task_active_pid_ns(task); if (unlikely(!pidns)) { err = -ENOENT; goto clear; } if (!ns_match(&pidns->ns, (dev_t)dev, ino)) goto clear; nsdata->pid = task_pid_nr_ns(task, pidns); nsdata->tgid = task_tgid_nr_ns(task, pidns); return 0; clear: memset((void *)nsdata, 0, (size_t) size); return err; } const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = { .func = bpf_get_ns_current_pid_tgid, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_UNINIT_MEM, .arg4_type = ARG_CONST_SIZE, }; static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { .func = bpf_get_raw_cpu_id, .gpl_only = false, .ret_type = RET_INTEGER, }; BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, u64, flags, void *, data, u64, size) { if (unlikely(flags & ~(BPF_F_INDEX_MASK))) return -EINVAL; return bpf_event_output(map, flags, data, size, NULL, 0, NULL); } const struct bpf_func_proto bpf_event_output_data_proto = { .func = bpf_event_output_data, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size, const void __user *, user_ptr) { int ret = copy_from_user(dst, user_ptr, size); if (unlikely(ret)) { memset(dst, 0, size); ret = -EFAULT; } return ret; } const struct bpf_func_proto bpf_copy_from_user_proto = { .func = bpf_copy_from_user, .gpl_only = false, .might_sleep = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_copy_from_user_task, void *, dst, u32, size, const void __user *, user_ptr, struct task_struct *, tsk, u64, flags) { int ret; /* flags is not used yet */ if (unlikely(flags)) return -EINVAL; if (unlikely(!size)) return 0; ret = access_process_vm(tsk, (unsigned long)user_ptr, dst, size, 0); if (ret == size) return 0; memset(dst, 0, size); /* Return -EFAULT for partial read */ return ret < 0 ? ret : -EFAULT; } const struct bpf_func_proto bpf_copy_from_user_task_proto = { .func = bpf_copy_from_user_task, .gpl_only = true, .might_sleep = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_BTF_ID, .arg4_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK], .arg5_type = ARG_ANYTHING }; BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu) { if (cpu >= nr_cpu_ids) return (unsigned long)NULL; return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu); } const struct bpf_func_proto bpf_per_cpu_ptr_proto = { .func = bpf_per_cpu_ptr, .gpl_only = false, .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | PTR_MAYBE_NULL | MEM_RDONLY, .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, .arg2_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr) { return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr); } const struct bpf_func_proto bpf_this_cpu_ptr_proto = { .func = bpf_this_cpu_ptr, .gpl_only = false, .ret_type = RET_PTR_TO_MEM_OR_BTF_ID | MEM_RDONLY, .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID, }; static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, size_t bufsz) { void __user *user_ptr = (__force void __user *)unsafe_ptr; buf[0] = 0; switch (fmt_ptype) { case 's': #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if ((unsigned long)unsafe_ptr < TASK_SIZE) return strncpy_from_user_nofault(buf, user_ptr, bufsz); fallthrough; #endif case 'k': return strncpy_from_kernel_nofault(buf, unsafe_ptr, bufsz); case 'u': return strncpy_from_user_nofault(buf, user_ptr, bufsz); } return -EINVAL; } /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary * arguments representation. */ #define MAX_BPRINTF_BIN_ARGS 512 /* Support executing three nested bprintf helper calls on a given CPU */ #define MAX_BPRINTF_NEST_LEVEL 3 struct bpf_bprintf_buffers { char bin_args[MAX_BPRINTF_BIN_ARGS]; char buf[MAX_BPRINTF_BUF]; }; static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); static int try_get_buffers(struct bpf_bprintf_buffers **bufs) { int nest_level; preempt_disable(); nest_level = this_cpu_inc_return(bpf_bprintf_nest_level); if (WARN_ON_ONCE(nest_level > MAX_BPRINTF_NEST_LEVEL)) { this_cpu_dec(bpf_bprintf_nest_level); preempt_enable(); return -EBUSY; } *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); return 0; } void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) { if (!data->bin_args && !data->buf) return; if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) return; this_cpu_dec(bpf_bprintf_nest_level); preempt_enable(); } /* * bpf_bprintf_prepare - Generic pass on format strings for bprintf-like helpers * * Returns a negative value if fmt is an invalid format string or 0 otherwise. * * This can be used in two ways: * - Format string verification only: when data->get_bin_args is false * - Arguments preparation: in addition to the above verification, it writes in * data->bin_args a binary representation of arguments usable by bstr_printf * where pointers from BPF have been sanitized. * * In argument preparation mode, if 0 is returned, safe temporary buffers are * allocated and bpf_bprintf_cleanup should be called to free them after use. */ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, u32 num_args, struct bpf_bprintf_data *data) { bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; struct bpf_bprintf_buffers *buffers = NULL; size_t sizeof_cur_arg, sizeof_cur_ip; int err, i, num_spec = 0; u64 cur_arg; char fmt_ptype, cur_ip[16], ip_spec[] = "%pXX"; fmt_end = strnchr(fmt, fmt_size, 0); if (!fmt_end) return -EINVAL; fmt_size = fmt_end - fmt; if (get_buffers && try_get_buffers(&buffers)) return -EBUSY; if (data->get_bin_args) { if (num_args) tmp_buf = buffers->bin_args; tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; data->bin_args = (u32 *)tmp_buf; } if (data->get_buf) data->buf = buffers->buf; for (i = 0; i < fmt_size; i++) { if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { err = -EINVAL; goto out; } if (fmt[i] != '%') continue; if (fmt[i + 1] == '%') { i++; continue; } if (num_spec >= num_args) { err = -EINVAL; goto out; } /* The string is zero-terminated so if fmt[i] != 0, we can * always access fmt[i + 1], in the worst case it will be a 0 */ i++; /* skip optional "[0 +-][num]" width formatting field */ while (fmt[i] == '0' || fmt[i] == '+' || fmt[i] == '-' || fmt[i] == ' ') i++; if (fmt[i] >= '1' && fmt[i] <= '9') { i++; while (fmt[i] >= '0' && fmt[i] <= '9') i++; } if (fmt[i] == 'p') { sizeof_cur_arg = sizeof(long); if ((fmt[i + 1] == 'k' || fmt[i + 1] == 'u') && fmt[i + 2] == 's') { fmt_ptype = fmt[i + 1]; i += 2; goto fmt_str; } if (fmt[i + 1] == 0 || isspace(fmt[i + 1]) || ispunct(fmt[i + 1]) || fmt[i + 1] == 'K' || fmt[i + 1] == 'x' || fmt[i + 1] == 's' || fmt[i + 1] == 'S') { /* just kernel pointers */ if (tmp_buf) cur_arg = raw_args[num_spec]; i++; goto nocopy_fmt; } if (fmt[i + 1] == 'B') { if (tmp_buf) { err = snprintf(tmp_buf, (tmp_buf_end - tmp_buf), "%pB", (void *)(long)raw_args[num_spec]); tmp_buf += (err + 1); } i++; num_spec++; continue; } /* only support "%pI4", "%pi4", "%pI6" and "%pi6". */ if ((fmt[i + 1] != 'i' && fmt[i + 1] != 'I') || (fmt[i + 2] != '4' && fmt[i + 2] != '6')) { err = -EINVAL; goto out; } i += 2; if (!tmp_buf) goto nocopy_fmt; sizeof_cur_ip = (fmt[i] == '4') ? 4 : 16; if (tmp_buf_end - tmp_buf < sizeof_cur_ip) { err = -ENOSPC; goto out; } unsafe_ptr = (char *)(long)raw_args[num_spec]; err = copy_from_kernel_nofault(cur_ip, unsafe_ptr, sizeof_cur_ip); if (err < 0) memset(cur_ip, 0, sizeof_cur_ip); /* hack: bstr_printf expects IP addresses to be * pre-formatted as strings, ironically, the easiest way * to do that is to call snprintf. */ ip_spec[2] = fmt[i - 1]; ip_spec[3] = fmt[i]; err = snprintf(tmp_buf, tmp_buf_end - tmp_buf, ip_spec, &cur_ip); tmp_buf += err + 1; num_spec++; continue; } else if (fmt[i] == 's') { fmt_ptype = fmt[i]; fmt_str: if (fmt[i + 1] != 0 && !isspace(fmt[i + 1]) && !ispunct(fmt[i + 1])) { err = -EINVAL; goto out; } if (!tmp_buf) goto nocopy_fmt; if (tmp_buf_end == tmp_buf) { err = -ENOSPC; goto out; } unsafe_ptr = (char *)(long)raw_args[num_spec]; err = bpf_trace_copy_string(tmp_buf, unsafe_ptr, fmt_ptype, tmp_buf_end - tmp_buf); if (err < 0) { tmp_buf[0] = '\0'; err = 1; } tmp_buf += err; num_spec++; continue; } else if (fmt[i] == 'c') { if (!tmp_buf) goto nocopy_fmt; if (tmp_buf_end == tmp_buf) { err = -ENOSPC; goto out; } *tmp_buf = raw_args[num_spec]; tmp_buf++; num_spec++; continue; } sizeof_cur_arg = sizeof(int); if (fmt[i] == 'l') { sizeof_cur_arg = sizeof(long); i++; } if (fmt[i] == 'l') { sizeof_cur_arg = sizeof(long long); i++; } if (fmt[i] != 'i' && fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x' && fmt[i] != 'X') { err = -EINVAL; goto out; } if (tmp_buf) cur_arg = raw_args[num_spec]; nocopy_fmt: if (tmp_buf) { tmp_buf = PTR_ALIGN(tmp_buf, sizeof(u32)); if (tmp_buf_end - tmp_buf < sizeof_cur_arg) { err = -ENOSPC; goto out; } if (sizeof_cur_arg == 8) { *(u32 *)tmp_buf = *(u32 *)&cur_arg; *(u32 *)(tmp_buf + 4) = *((u32 *)&cur_arg + 1); } else { *(u32 *)tmp_buf = (u32)(long)cur_arg; } tmp_buf += sizeof_cur_arg; } num_spec++; } err = 0; out: if (err) bpf_bprintf_cleanup(data); return err; } BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, const void *, args, u32, data_len) { struct bpf_bprintf_data data = { .get_bin_args = true, }; int err, num_args; if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || (data_len && !args)) return -EINVAL; num_args = data_len / 8; /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we * can safely give an unbounded size. */ err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); if (err < 0) return err; err = bstr_printf(str, str_size, fmt, data.bin_args); bpf_bprintf_cleanup(&data); return err + 1; } const struct bpf_func_proto bpf_snprintf_proto = { .func = bpf_snprintf, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_MEM_OR_NULL, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_PTR_TO_CONST_STR, .arg4_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY, .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; struct bpf_async_cb { struct bpf_map *map; struct bpf_prog *prog; void __rcu *callback_fn; void *value; union { struct rcu_head rcu; struct work_struct delete_work; }; u64 flags; }; /* BPF map elements can contain 'struct bpf_timer'. * Such map owns all of its BPF timers. * 'struct bpf_timer' is allocated as part of map element allocation * and it's zero initialized. * That space is used to keep 'struct bpf_async_kern'. * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and * remembers 'struct bpf_map *' pointer it's part of. * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. * bpf_timer_start() arms the timer. * If user space reference to a map goes to zero at this point * ops->map_release_uref callback is responsible for cancelling the timers, * freeing their memory, and decrementing prog's refcnts. * bpf_timer_cancel() cancels the timer and decrements prog's refcnt. * Inner maps can contain bpf timers as well. ops->map_release_uref is * freeing the timers when inner map is replaced or deleted by user space. */ struct bpf_hrtimer { struct bpf_async_cb cb; struct hrtimer timer; atomic_t cancelling; }; struct bpf_work { struct bpf_async_cb cb; struct work_struct work; struct work_struct delete_work; }; /* the actual struct hidden inside uapi struct bpf_timer and bpf_wq */ struct bpf_async_kern { union { struct bpf_async_cb *cb; struct bpf_hrtimer *timer; struct bpf_work *work; }; /* bpf_spin_lock is used here instead of spinlock_t to make * sure that it always fits into space reserved by struct bpf_timer * regardless of LOCKDEP and spinlock debug flags. */ struct bpf_spin_lock lock; } __attribute__((aligned(8))); enum bpf_async_type { BPF_ASYNC_TYPE_TIMER = 0, BPF_ASYNC_TYPE_WQ, }; static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) { struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); struct bpf_map *map = t->cb.map; void *value = t->cb.value; bpf_callback_t callback_fn; void *key; u32 idx; BTF_TYPE_EMIT(struct bpf_timer); callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); if (!callback_fn) goto out; /* bpf_timer_cb() runs in hrtimer_run_softirq. It doesn't migrate and * cannot be preempted by another bpf_timer_cb() on the same cpu. * Remember the timer this callback is servicing to prevent * deadlock if callback_fn() calls bpf_timer_cancel() or * bpf_map_delete_elem() on the same timer. */ this_cpu_write(hrtimer_running, t); if (map->map_type == BPF_MAP_TYPE_ARRAY) { struct bpf_array *array = container_of(map, struct bpf_array, map); /* compute the key */ idx = ((char *)value - array->value) / array->elem_size; key = &idx; } else { /* hash or lru */ key = value - round_up(map->key_size, 8); } callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); /* The verifier checked that return value is zero. */ this_cpu_write(hrtimer_running, NULL); out: return HRTIMER_NORESTART; } static void bpf_wq_work(struct work_struct *work) { struct bpf_work *w = container_of(work, struct bpf_work, work); struct bpf_async_cb *cb = &w->cb; struct bpf_map *map = cb->map; bpf_callback_t callback_fn; void *value = cb->value; void *key; u32 idx; BTF_TYPE_EMIT(struct bpf_wq); callback_fn = READ_ONCE(cb->callback_fn); if (!callback_fn) return; if (map->map_type == BPF_MAP_TYPE_ARRAY) { struct bpf_array *array = container_of(map, struct bpf_array, map); /* compute the key */ idx = ((char *)value - array->value) / array->elem_size; key = &idx; } else { /* hash or lru */ key = value - round_up(map->key_size, 8); } rcu_read_lock_trace(); migrate_disable(); callback_fn((u64)(long)map, (u64)(long)key, (u64)(long)value, 0, 0); migrate_enable(); rcu_read_unlock_trace(); } static void bpf_wq_delete_work(struct work_struct *work) { struct bpf_work *w = container_of(work, struct bpf_work, delete_work); cancel_work_sync(&w->work); kfree_rcu(w, cb.rcu); } static void bpf_timer_delete_work(struct work_struct *work) { struct bpf_hrtimer *t = container_of(work, struct bpf_hrtimer, cb.delete_work); /* Cancel the timer and wait for callback to complete if it was running. * If hrtimer_cancel() can be safely called it's safe to call * kfree_rcu(t) right after for both preallocated and non-preallocated * maps. The async->cb = NULL was already done and no code path can see * address 't' anymore. Timer if armed for existing bpf_hrtimer before * bpf_timer_cancel_and_free will have been cancelled. */ hrtimer_cancel(&t->timer); kfree_rcu(t, cb.rcu); } static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, enum bpf_async_type type) { struct bpf_async_cb *cb; struct bpf_hrtimer *t; struct bpf_work *w; clockid_t clockid; size_t size; int ret = 0; if (in_nmi()) return -EOPNOTSUPP; switch (type) { case BPF_ASYNC_TYPE_TIMER: size = sizeof(struct bpf_hrtimer); break; case BPF_ASYNC_TYPE_WQ: size = sizeof(struct bpf_work); break; default: return -EINVAL; } __bpf_spin_lock_irqsave(&async->lock); t = async->timer; if (t) { ret = -EBUSY; goto out; } /* allocate hrtimer via map_kmalloc to use memcg accounting */ cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); if (!cb) { ret = -ENOMEM; goto out; } switch (type) { case BPF_ASYNC_TYPE_TIMER: clockid = flags & (MAX_CLOCKS - 1); t = (struct bpf_hrtimer *)cb; atomic_set(&t->cancelling, 0); INIT_WORK(&t->cb.delete_work, bpf_timer_delete_work); hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); t->timer.function = bpf_timer_cb; cb->value = (void *)async - map->record->timer_off; break; case BPF_ASYNC_TYPE_WQ: w = (struct bpf_work *)cb; INIT_WORK(&w->work, bpf_wq_work); INIT_WORK(&w->delete_work, bpf_wq_delete_work); cb->value = (void *)async - map->record->wq_off; break; } cb->map = map; cb->prog = NULL; cb->flags = flags; rcu_assign_pointer(cb->callback_fn, NULL); WRITE_ONCE(async->cb, cb); /* Guarantee the order between async->cb and map->usercnt. So * when there are concurrent uref release and bpf timer init, either * bpf_timer_cancel_and_free() called by uref release reads a no-NULL * timer or atomic64_read() below returns a zero usercnt. */ smp_mb(); if (!atomic64_read(&map->usercnt)) { /* maps with timers must be either held by user space * or pinned in bpffs. */ WRITE_ONCE(async->cb, NULL); kfree(cb); ret = -EPERM; } out: __bpf_spin_unlock_irqrestore(&async->lock); return ret; } BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map, u64, flags) { clock_t clockid = flags & (MAX_CLOCKS - 1); BUILD_BUG_ON(MAX_CLOCKS != 16); BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer)); BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer)); if (flags >= MAX_CLOCKS || /* similar to timerfd except _ALARM variants are not supported */ (clockid != CLOCK_MONOTONIC && clockid != CLOCK_REALTIME && clockid != CLOCK_BOOTTIME)) return -EINVAL; return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER); } static const struct bpf_func_proto bpf_timer_init_proto = { .func = bpf_timer_init, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_TIMER, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, }; static int __bpf_async_set_callback(struct bpf_async_kern *async, void *callback_fn, struct bpf_prog_aux *aux, unsigned int flags, enum bpf_async_type type) { struct bpf_prog *prev, *prog = aux->prog; struct bpf_async_cb *cb; int ret = 0; if (in_nmi()) return -EOPNOTSUPP; __bpf_spin_lock_irqsave(&async->lock); cb = async->cb; if (!cb) { ret = -EINVAL; goto out; } if (!atomic64_read(&cb->map->usercnt)) { /* maps with timers must be either held by user space * or pinned in bpffs. Otherwise timer might still be * running even when bpf prog is detached and user space * is gone, since map_release_uref won't ever be called. */ ret = -EPERM; goto out; } prev = cb->prog; if (prev != prog) { /* Bump prog refcnt once. Every bpf_timer_set_callback() * can pick different callback_fn-s within the same prog. */ prog = bpf_prog_inc_not_zero(prog); if (IS_ERR(prog)) { ret = PTR_ERR(prog); goto out; } if (prev) /* Drop prev prog refcnt when swapping with new prog */ bpf_prog_put(prev); cb->prog = prog; } rcu_assign_pointer(cb->callback_fn, callback_fn); out: __bpf_spin_unlock_irqrestore(&async->lock); return ret; } BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn, struct bpf_prog_aux *, aux) { return __bpf_async_set_callback(timer, callback_fn, aux, 0, BPF_ASYNC_TYPE_TIMER); } static const struct bpf_func_proto bpf_timer_set_callback_proto = { .func = bpf_timer_set_callback, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_TIMER, .arg2_type = ARG_PTR_TO_FUNC, }; BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags) { struct bpf_hrtimer *t; int ret = 0; enum hrtimer_mode mode; if (in_nmi()) return -EOPNOTSUPP; if (flags & ~(BPF_F_TIMER_ABS | BPF_F_TIMER_CPU_PIN)) return -EINVAL; __bpf_spin_lock_irqsave(&timer->lock); t = timer->timer; if (!t || !t->cb.prog) { ret = -EINVAL; goto out; } if (flags & BPF_F_TIMER_ABS) mode = HRTIMER_MODE_ABS_SOFT; else mode = HRTIMER_MODE_REL_SOFT; if (flags & BPF_F_TIMER_CPU_PIN) mode |= HRTIMER_MODE_PINNED; hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); out: __bpf_spin_unlock_irqrestore(&timer->lock); return ret; } static const struct bpf_func_proto bpf_timer_start_proto = { .func = bpf_timer_start, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_TIMER, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static void drop_prog_refcnt(struct bpf_async_cb *async) { struct bpf_prog *prog = async->prog; if (prog) { bpf_prog_put(prog); async->prog = NULL; rcu_assign_pointer(async->callback_fn, NULL); } } BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) { struct bpf_hrtimer *t, *cur_t; bool inc = false; int ret = 0; if (in_nmi()) return -EOPNOTSUPP; rcu_read_lock(); __bpf_spin_lock_irqsave(&timer->lock); t = timer->timer; if (!t) { ret = -EINVAL; goto out; } cur_t = this_cpu_read(hrtimer_running); if (cur_t == t) { /* If bpf callback_fn is trying to bpf_timer_cancel() * its own timer the hrtimer_cancel() will deadlock * since it waits for callback_fn to finish. */ ret = -EDEADLK; goto out; } /* Only account in-flight cancellations when invoked from a timer * callback, since we want to avoid waiting only if other _callbacks_ * are waiting on us, to avoid introducing lockups. Non-callback paths * are ok, since nobody would synchronously wait for their completion. */ if (!cur_t) goto drop; atomic_inc(&t->cancelling); /* Need full barrier after relaxed atomic_inc */ smp_mb__after_atomic(); inc = true; if (atomic_read(&cur_t->cancelling)) { /* We're cancelling timer t, while some other timer callback is * attempting to cancel us. In such a case, it might be possible * that timer t belongs to the other callback, or some other * callback waiting upon it (creating transitive dependencies * upon us), and we will enter a deadlock if we continue * cancelling and waiting for it synchronously, since it might * do the same. Bail! */ ret = -EDEADLK; goto out; } drop: drop_prog_refcnt(&t->cb); out: __bpf_spin_unlock_irqrestore(&timer->lock); /* Cancel the timer and wait for associated callback to finish * if it was running. */ ret = ret ?: hrtimer_cancel(&t->timer); if (inc) atomic_dec(&t->cancelling); rcu_read_unlock(); return ret; } static const struct bpf_func_proto bpf_timer_cancel_proto = { .func = bpf_timer_cancel, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_TIMER, }; static struct bpf_async_cb *__bpf_async_cancel_and_free(struct bpf_async_kern *async) { struct bpf_async_cb *cb; /* Performance optimization: read async->cb without lock first. */ if (!READ_ONCE(async->cb)) return NULL; __bpf_spin_lock_irqsave(&async->lock); /* re-read it under lock */ cb = async->cb; if (!cb) goto out; drop_prog_refcnt(cb); /* The subsequent bpf_timer_start/cancel() helpers won't be able to use * this timer, since it won't be initialized. */ WRITE_ONCE(async->cb, NULL); out: __bpf_spin_unlock_irqrestore(&async->lock); return cb; } /* This function is called by map_delete/update_elem for individual element and * by ops->map_release_uref when the user space reference to a map reaches zero. */ void bpf_timer_cancel_and_free(void *val) { struct bpf_hrtimer *t; t = (struct bpf_hrtimer *)__bpf_async_cancel_and_free(val); if (!t) return; /* We check that bpf_map_delete/update_elem() was called from timer * callback_fn. In such case we don't call hrtimer_cancel() (since it * will deadlock) and don't call hrtimer_try_to_cancel() (since it will * just return -1). Though callback_fn is still running on this cpu it's * safe to do kfree(t) because bpf_timer_cb() read everything it needed * from 't'. The bpf subprog callback_fn won't be able to access 't', * since async->cb = NULL was already done. The timer will be * effectively cancelled because bpf_timer_cb() will return * HRTIMER_NORESTART. * * However, it is possible the timer callback_fn calling us armed the * timer _before_ calling us, such that failing to cancel it here will * cause it to possibly use struct hrtimer after freeing bpf_hrtimer. * Therefore, we _need_ to cancel any outstanding timers before we do * kfree_rcu, even though no more timers can be armed. * * Moreover, we need to schedule work even if timer does not belong to * the calling callback_fn, as on two different CPUs, we can end up in a * situation where both sides run in parallel, try to cancel one * another, and we end up waiting on both sides in hrtimer_cancel * without making forward progress, since timer1 depends on time2 * callback to finish, and vice versa. * * CPU 1 (timer1_cb) CPU 2 (timer2_cb) * bpf_timer_cancel_and_free(timer2) bpf_timer_cancel_and_free(timer1) * * To avoid these issues, punt to workqueue context when we are in a * timer callback. */ if (this_cpu_read(hrtimer_running)) queue_work(system_unbound_wq, &t->cb.delete_work); else bpf_timer_delete_work(&t->cb.delete_work); } /* This function is called by map_delete/update_elem for individual element and * by ops->map_release_uref when the user space reference to a map reaches zero. */ void bpf_wq_cancel_and_free(void *val) { struct bpf_work *work; BTF_TYPE_EMIT(struct bpf_wq); work = (struct bpf_work *)__bpf_async_cancel_and_free(val); if (!work) return; /* Trigger cancel of the sleepable work, but *do not* wait for * it to finish if it was running as we might not be in a * sleepable context. * kfree will be called once the work has finished. */ schedule_work(&work->delete_work); } BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) { unsigned long *kptr = map_value; /* This helper may be inlined by verifier. */ return xchg(kptr, (unsigned long)ptr); } /* Unlike other PTR_TO_BTF_ID helpers the btf_id in bpf_kptr_xchg() * helper is determined dynamically by the verifier. Use BPF_PTR_POISON to * denote type that verifier will determine. */ static const struct bpf_func_proto bpf_kptr_xchg_proto = { .func = bpf_kptr_xchg, .gpl_only = false, .ret_type = RET_PTR_TO_BTF_ID_OR_NULL, .ret_btf_id = BPF_PTR_POISON, .arg1_type = ARG_PTR_TO_KPTR, .arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL | OBJ_RELEASE, .arg2_btf_id = BPF_PTR_POISON, }; /* Since the upper 8 bits of dynptr->size is reserved, the * maximum supported size is 2^24 - 1. */ #define DYNPTR_MAX_SIZE ((1UL << 24) - 1) #define DYNPTR_TYPE_SHIFT 28 #define DYNPTR_SIZE_MASK 0xFFFFFF #define DYNPTR_RDONLY_BIT BIT(31) bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) { return ptr->size & DYNPTR_RDONLY_BIT; } void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) { ptr->size |= DYNPTR_RDONLY_BIT; } static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_type type) { ptr->size |= type << DYNPTR_TYPE_SHIFT; } static enum bpf_dynptr_type bpf_dynptr_get_type(const struct bpf_dynptr_kern *ptr) { return (ptr->size & ~(DYNPTR_RDONLY_BIT)) >> DYNPTR_TYPE_SHIFT; } u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) { return ptr->size & DYNPTR_SIZE_MASK; } static void bpf_dynptr_set_size(struct bpf_dynptr_kern *ptr, u32 new_size) { u32 metadata = ptr->size & ~DYNPTR_SIZE_MASK; ptr->size = new_size | metadata; } int bpf_dynptr_check_size(u32 size) { return size > DYNPTR_MAX_SIZE ? -E2BIG : 0; } void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, enum bpf_dynptr_type type, u32 offset, u32 size) { ptr->data = data; ptr->offset = offset; ptr->size = size; bpf_dynptr_set_type(ptr, type); } void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) { memset(ptr, 0, sizeof(*ptr)); } static int bpf_dynptr_check_off_len(const struct bpf_dynptr_kern *ptr, u32 offset, u32 len) { u32 size = __bpf_dynptr_size(ptr); if (len > size || offset > size - len) return -E2BIG; return 0; } BPF_CALL_4(bpf_dynptr_from_mem, void *, data, u32, size, u64, flags, struct bpf_dynptr_kern *, ptr) { int err; BTF_TYPE_EMIT(struct bpf_dynptr); err = bpf_dynptr_check_size(size); if (err) goto error; /* flags is currently unsupported */ if (flags) { err = -EINVAL; goto error; } bpf_dynptr_init(ptr, data, BPF_DYNPTR_TYPE_LOCAL, 0, size); return 0; error: bpf_dynptr_set_null(ptr); return err; } static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { .func = bpf_dynptr_from_mem, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, }; BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src, u32, offset, u64, flags) { enum bpf_dynptr_type type; int err; if (!src->data || flags) return -EINVAL; err = bpf_dynptr_check_off_len(src, offset, len); if (err) return err; type = bpf_dynptr_get_type(src); switch (type) { case BPF_DYNPTR_TYPE_LOCAL: case BPF_DYNPTR_TYPE_RINGBUF: /* Source and destination may possibly overlap, hence use memmove to * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr * pointing to overlapping PTR_TO_MAP_VALUE regions. */ memmove(dst, src->data + src->offset + offset, len); return 0; case BPF_DYNPTR_TYPE_SKB: return __bpf_skb_load_bytes(src->data, src->offset + offset, dst, len); case BPF_DYNPTR_TYPE_XDP: return __bpf_xdp_load_bytes(src->data, src->offset + offset, dst, len); default: WARN_ONCE(true, "bpf_dynptr_read: unknown dynptr type %d\n", type); return -EFAULT; } } static const struct bpf_func_proto bpf_dynptr_read_proto = { .func = bpf_dynptr_read, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_dynptr_write, const struct bpf_dynptr_kern *, dst, u32, offset, void *, src, u32, len, u64, flags) { enum bpf_dynptr_type type; int err; if (!dst->data || __bpf_dynptr_is_rdonly(dst)) return -EINVAL; err = bpf_dynptr_check_off_len(dst, offset, len); if (err) return err; type = bpf_dynptr_get_type(dst); switch (type) { case BPF_DYNPTR_TYPE_LOCAL: case BPF_DYNPTR_TYPE_RINGBUF: if (flags) return -EINVAL; /* Source and destination may possibly overlap, hence use memmove to * copy the data. E.g. bpf_dynptr_from_mem may create two dynptr * pointing to overlapping PTR_TO_MAP_VALUE regions. */ memmove(dst->data + dst->offset + offset, src, len); return 0; case BPF_DYNPTR_TYPE_SKB: return __bpf_skb_store_bytes(dst->data, dst->offset + offset, src, len, flags); case BPF_DYNPTR_TYPE_XDP: if (flags) return -EINVAL; return __bpf_xdp_store_bytes(dst->data, dst->offset + offset, src, len); default: WARN_ONCE(true, "bpf_dynptr_write: unknown dynptr type %d\n", type); return -EFAULT; } } static const struct bpf_func_proto bpf_dynptr_write_proto = { .func = bpf_dynptr_write, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg4_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_dynptr_data, const struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) { enum bpf_dynptr_type type; int err; if (!ptr->data) return 0; err = bpf_dynptr_check_off_len(ptr, offset, len); if (err) return 0; if (__bpf_dynptr_is_rdonly(ptr)) return 0; type = bpf_dynptr_get_type(ptr); switch (type) { case BPF_DYNPTR_TYPE_LOCAL: case BPF_DYNPTR_TYPE_RINGBUF: return (unsigned long)(ptr->data + ptr->offset + offset); case BPF_DYNPTR_TYPE_SKB: case BPF_DYNPTR_TYPE_XDP: /* skb and xdp dynptrs should use bpf_dynptr_slice / bpf_dynptr_slice_rdwr */ return 0; default: WARN_ONCE(true, "bpf_dynptr_data: unknown dynptr type %d\n", type); return 0; } } static const struct bpf_func_proto bpf_dynptr_data_proto = { .func = bpf_dynptr_data, .gpl_only = false, .ret_type = RET_PTR_TO_DYNPTR_MEM_OR_NULL, .arg1_type = ARG_PTR_TO_DYNPTR | MEM_RDONLY, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_CONST_ALLOC_SIZE_OR_ZERO, }; const struct bpf_func_proto bpf_get_current_task_proto __weak; const struct bpf_func_proto bpf_get_current_task_btf_proto __weak; const struct bpf_func_proto bpf_probe_read_user_proto __weak; const struct bpf_func_proto bpf_probe_read_user_str_proto __weak; const struct bpf_func_proto bpf_probe_read_kernel_proto __weak; const struct bpf_func_proto bpf_probe_read_kernel_str_proto __weak; const struct bpf_func_proto bpf_task_pt_regs_proto __weak; const struct bpf_func_proto * bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_map_lookup_elem: return &bpf_map_lookup_elem_proto; case BPF_FUNC_map_update_elem: return &bpf_map_update_elem_proto; case BPF_FUNC_map_delete_elem: return &bpf_map_delete_elem_proto; case BPF_FUNC_map_push_elem: return &bpf_map_push_elem_proto; case BPF_FUNC_map_pop_elem: return &bpf_map_pop_elem_proto; case BPF_FUNC_map_peek_elem: return &bpf_map_peek_elem_proto; case BPF_FUNC_map_lookup_percpu_elem: return &bpf_map_lookup_percpu_elem_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; case BPF_FUNC_get_smp_processor_id: return &bpf_get_raw_smp_processor_id_proto; case BPF_FUNC_get_numa_node_id: return &bpf_get_numa_node_id_proto; case BPF_FUNC_tail_call: return &bpf_tail_call_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; case BPF_FUNC_ktime_get_boot_ns: return &bpf_ktime_get_boot_ns_proto; case BPF_FUNC_ktime_get_tai_ns: return &bpf_ktime_get_tai_ns_proto; case BPF_FUNC_ringbuf_output: return &bpf_ringbuf_output_proto; case BPF_FUNC_ringbuf_reserve: return &bpf_ringbuf_reserve_proto; case BPF_FUNC_ringbuf_submit: return &bpf_ringbuf_submit_proto; case BPF_FUNC_ringbuf_discard: return &bpf_ringbuf_discard_proto; case BPF_FUNC_ringbuf_query: return &bpf_ringbuf_query_proto; case BPF_FUNC_strncmp: return &bpf_strncmp_proto; case BPF_FUNC_strtol: return &bpf_strtol_proto; case BPF_FUNC_strtoul: return &bpf_strtoul_proto; case BPF_FUNC_get_current_pid_tgid: return &bpf_get_current_pid_tgid_proto; case BPF_FUNC_get_ns_current_pid_tgid: return &bpf_get_ns_current_pid_tgid_proto; default: break; } if (!bpf_token_capable(prog->aux->token, CAP_BPF)) return NULL; switch (func_id) { case BPF_FUNC_spin_lock: return &bpf_spin_lock_proto; case BPF_FUNC_spin_unlock: return &bpf_spin_unlock_proto; case BPF_FUNC_jiffies64: return &bpf_jiffies64_proto; case BPF_FUNC_per_cpu_ptr: return &bpf_per_cpu_ptr_proto; case BPF_FUNC_this_cpu_ptr: return &bpf_this_cpu_ptr_proto; case BPF_FUNC_timer_init: return &bpf_timer_init_proto; case BPF_FUNC_timer_set_callback: return &bpf_timer_set_callback_proto; case BPF_FUNC_timer_start: return &bpf_timer_start_proto; case BPF_FUNC_timer_cancel: return &bpf_timer_cancel_proto; case BPF_FUNC_kptr_xchg: return &bpf_kptr_xchg_proto; case BPF_FUNC_for_each_map_elem: return &bpf_for_each_map_elem_proto; case BPF_FUNC_loop: return &bpf_loop_proto; case BPF_FUNC_user_ringbuf_drain: return &bpf_user_ringbuf_drain_proto; case BPF_FUNC_ringbuf_reserve_dynptr: return &bpf_ringbuf_reserve_dynptr_proto; case BPF_FUNC_ringbuf_submit_dynptr: return &bpf_ringbuf_submit_dynptr_proto; case BPF_FUNC_ringbuf_discard_dynptr: return &bpf_ringbuf_discard_dynptr_proto; case BPF_FUNC_dynptr_from_mem: return &bpf_dynptr_from_mem_proto; case BPF_FUNC_dynptr_read: return &bpf_dynptr_read_proto; case BPF_FUNC_dynptr_write: return &bpf_dynptr_write_proto; case BPF_FUNC_dynptr_data: return &bpf_dynptr_data_proto; #ifdef CONFIG_CGROUPS case BPF_FUNC_cgrp_storage_get: return &bpf_cgrp_storage_get_proto; case BPF_FUNC_cgrp_storage_delete: return &bpf_cgrp_storage_delete_proto; case BPF_FUNC_get_current_cgroup_id: return &bpf_get_current_cgroup_id_proto; case BPF_FUNC_get_current_ancestor_cgroup_id: return &bpf_get_current_ancestor_cgroup_id_proto; #endif default: break; } if (!bpf_token_capable(prog->aux->token, CAP_PERFMON)) return NULL; switch (func_id) { case BPF_FUNC_trace_printk: return bpf_get_trace_printk_proto(); case BPF_FUNC_get_current_task: return &bpf_get_current_task_proto; case BPF_FUNC_get_current_task_btf: return &bpf_get_current_task_btf_proto; case BPF_FUNC_probe_read_user: return &bpf_probe_read_user_proto; case BPF_FUNC_probe_read_kernel: return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_kernel_proto; case BPF_FUNC_probe_read_user_str: return &bpf_probe_read_user_str_proto; case BPF_FUNC_probe_read_kernel_str: return security_locked_down(LOCKDOWN_BPF_READ_KERNEL) < 0 ? NULL : &bpf_probe_read_kernel_str_proto; case BPF_FUNC_snprintf_btf: return &bpf_snprintf_btf_proto; case BPF_FUNC_snprintf: return &bpf_snprintf_proto; case BPF_FUNC_task_pt_regs: return &bpf_task_pt_regs_proto; case BPF_FUNC_trace_vprintk: return bpf_get_trace_vprintk_proto(); default: return NULL; } } void bpf_list_head_free(const struct btf_field *field, void *list_head, struct bpf_spin_lock *spin_lock) { struct list_head *head = list_head, *orig_head = list_head; BUILD_BUG_ON(sizeof(struct list_head) > sizeof(struct bpf_list_head)); BUILD_BUG_ON(__alignof__(struct list_head) > __alignof__(struct bpf_list_head)); /* Do the actual list draining outside the lock to not hold the lock for * too long, and also prevent deadlocks if tracing programs end up * executing on entry/exit of functions called inside the critical * section, and end up doing map ops that call bpf_list_head_free for * the same map value again. */ __bpf_spin_lock_irqsave(spin_lock); if (!head->next || list_empty(head)) goto unlock; head = head->next; unlock: INIT_LIST_HEAD(orig_head); __bpf_spin_unlock_irqrestore(spin_lock); while (head != orig_head) { void *obj = head; obj -= field->graph_root.node_offset; head = head->next; /* The contained type can also have resources, including a * bpf_list_head which needs to be freed. */ migrate_disable(); __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); migrate_enable(); } } /* Like rbtree_postorder_for_each_entry_safe, but 'pos' and 'n' are * 'rb_node *', so field name of rb_node within containing struct is not * needed. * * Since bpf_rb_tree's node type has a corresponding struct btf_field with * graph_root.node_offset, it's not necessary to know field name * or type of node struct */ #define bpf_rbtree_postorder_for_each_entry_safe(pos, n, root) \ for (pos = rb_first_postorder(root); \ pos && ({ n = rb_next_postorder(pos); 1; }); \ pos = n) void bpf_rb_root_free(const struct btf_field *field, void *rb_root, struct bpf_spin_lock *spin_lock) { struct rb_root_cached orig_root, *root = rb_root; struct rb_node *pos, *n; void *obj; BUILD_BUG_ON(sizeof(struct rb_root_cached) > sizeof(struct bpf_rb_root)); BUILD_BUG_ON(__alignof__(struct rb_root_cached) > __alignof__(struct bpf_rb_root)); __bpf_spin_lock_irqsave(spin_lock); orig_root = *root; *root = RB_ROOT_CACHED; __bpf_spin_unlock_irqrestore(spin_lock); bpf_rbtree_postorder_for_each_entry_safe(pos, n, &orig_root.rb_root) { obj = pos; obj -= field->graph_root.node_offset; migrate_disable(); __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); migrate_enable(); } } __bpf_kfunc_start_defs(); __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign) { struct btf_struct_meta *meta = meta__ign; u64 size = local_type_id__k; void *p; p = bpf_mem_alloc(&bpf_global_ma, size); if (!p) return NULL; if (meta) bpf_obj_init(meta->record, p); return p; } __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) { u64 size = local_type_id__k; /* The verifier has ensured that meta__ign must be NULL */ return bpf_mem_alloc(&bpf_global_percpu_ma, size); } /* Must be called under migrate_disable(), as required by bpf_mem_free */ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) { struct bpf_mem_alloc *ma; if (rec && rec->refcount_off >= 0 && !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { /* Object is refcounted and refcount_dec didn't result in 0 * refcount. Return without freeing the object */ return; } if (rec) bpf_obj_free_fields(rec, p); if (percpu) ma = &bpf_global_percpu_ma; else ma = &bpf_global_ma; bpf_mem_free_rcu(ma, p); } __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) { struct btf_struct_meta *meta = meta__ign; void *p = p__alloc; __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); } __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) { /* The verifier has ensured that meta__ign must be NULL */ bpf_mem_free_rcu(&bpf_global_percpu_ma, p__alloc); } __bpf_kfunc void *bpf_refcount_acquire_impl(void *p__refcounted_kptr, void *meta__ign) { struct btf_struct_meta *meta = meta__ign; struct bpf_refcount *ref; /* Could just cast directly to refcount_t *, but need some code using * bpf_refcount type so that it is emitted in vmlinux BTF */ ref = (struct bpf_refcount *)(p__refcounted_kptr + meta->record->refcount_off); if (!refcount_inc_not_zero((refcount_t *)ref)) return NULL; /* Verifier strips KF_RET_NULL if input is owned ref, see is_kfunc_ret_null * in verifier.c */ return (void *)p__refcounted_kptr; } static int __bpf_list_add(struct bpf_list_node_kern *node, struct bpf_list_head *head, bool tail, struct btf_record *rec, u64 off) { struct list_head *n = &node->list_head, *h = (void *)head; /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't * called on its fields, so init here */ if (unlikely(!h->next)) INIT_LIST_HEAD(h); /* node->owner != NULL implies !list_empty(n), no need to separately * check the latter */ if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { /* Only called from BPF prog, no need to migrate_disable */ __bpf_obj_drop_impl((void *)n - off, rec, false); return -EINVAL; } tail ? list_add_tail(n, h) : list_add(n, h); WRITE_ONCE(node->owner, head); return 0; } __bpf_kfunc int bpf_list_push_front_impl(struct bpf_list_head *head, struct bpf_list_node *node, void *meta__ign, u64 off) { struct bpf_list_node_kern *n = (void *)node; struct btf_struct_meta *meta = meta__ign; return __bpf_list_add(n, head, false, meta ? meta->record : NULL, off); } __bpf_kfunc int bpf_list_push_back_impl(struct bpf_list_head *head, struct bpf_list_node *node, void *meta__ign, u64 off) { struct bpf_list_node_kern *n = (void *)node; struct btf_struct_meta *meta = meta__ign; return __bpf_list_add(n, head, true, meta ? meta->record : NULL, off); } static struct bpf_list_node *__bpf_list_del(struct bpf_list_head *head, bool tail) { struct list_head *n, *h = (void *)head; struct bpf_list_node_kern *node; /* If list_head was 0-initialized by map, bpf_obj_init_field wasn't * called on its fields, so init here */ if (unlikely(!h->next)) INIT_LIST_HEAD(h); if (list_empty(h)) return NULL; n = tail ? h->prev : h->next; node = container_of(n, struct bpf_list_node_kern, list_head); if (WARN_ON_ONCE(READ_ONCE(node->owner) != head)) return NULL; list_del_init(n); WRITE_ONCE(node->owner, NULL); return (struct bpf_list_node *)n; } __bpf_kfunc struct bpf_list_node *bpf_list_pop_front(struct bpf_list_head *head) { return __bpf_list_del(head, false); } __bpf_kfunc struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head) { return __bpf_list_del(head, true); } __bpf_kfunc struct bpf_rb_node *bpf_rbtree_remove(struct bpf_rb_root *root, struct bpf_rb_node *node) { struct bpf_rb_node_kern *node_internal = (struct bpf_rb_node_kern *)node; struct rb_root_cached *r = (struct rb_root_cached *)root; struct rb_node *n = &node_internal->rb_node; /* node_internal->owner != root implies either RB_EMPTY_NODE(n) or * n is owned by some other tree. No need to check RB_EMPTY_NODE(n) */ if (READ_ONCE(node_internal->owner) != root) return NULL; rb_erase_cached(n, r); RB_CLEAR_NODE(n); WRITE_ONCE(node_internal->owner, NULL); return (struct bpf_rb_node *)n; } /* Need to copy rbtree_add_cached's logic here because our 'less' is a BPF * program */ static int __bpf_rbtree_add(struct bpf_rb_root *root, struct bpf_rb_node_kern *node, void *less, struct btf_record *rec, u64 off) { struct rb_node **link = &((struct rb_root_cached *)root)->rb_root.rb_node; struct rb_node *parent = NULL, *n = &node->rb_node; bpf_callback_t cb = (bpf_callback_t)less; bool leftmost = true; /* node->owner != NULL implies !RB_EMPTY_NODE(n), no need to separately * check the latter */ if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { /* Only called from BPF prog, no need to migrate_disable */ __bpf_obj_drop_impl((void *)n - off, rec, false); return -EINVAL; } while (*link) { parent = *link; if (cb((uintptr_t)node, (uintptr_t)parent, 0, 0, 0)) { link = &parent->rb_left; } else { link = &parent->rb_right; leftmost = false; } } rb_link_node(n, parent, link); rb_insert_color_cached(n, (struct rb_root_cached *)root, leftmost); WRITE_ONCE(node->owner, root); return 0; } __bpf_kfunc int bpf_rbtree_add_impl(struct bpf_rb_root *root, struct bpf_rb_node *node, bool (less)(struct bpf_rb_node *a, const struct bpf_rb_node *b), void *meta__ign, u64 off) { struct btf_struct_meta *meta = meta__ign; struct bpf_rb_node_kern *n = (void *)node; return __bpf_rbtree_add(root, n, (void *)less, meta ? meta->record : NULL, off); } __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root) { struct rb_root_cached *r = (struct rb_root_cached *)root; return (struct bpf_rb_node *)rb_first_cached(r); } /** * bpf_task_acquire - Acquire a reference to a task. A task acquired by this * kfunc which is not stored in a map as a kptr, must be released by calling * bpf_task_release(). * @p: The task on which a reference is being acquired. */ __bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p) { if (refcount_inc_not_zero(&p->rcu_users)) return p; return NULL; } /** * bpf_task_release - Release the reference acquired on a task. * @p: The task on which a reference is being released. */ __bpf_kfunc void bpf_task_release(struct task_struct *p) { put_task_struct_rcu_user(p); } __bpf_kfunc void bpf_task_release_dtor(void *p) { put_task_struct_rcu_user(p); } CFI_NOSEAL(bpf_task_release_dtor); #ifdef CONFIG_CGROUPS /** * bpf_cgroup_acquire - Acquire a reference to a cgroup. A cgroup acquired by * this kfunc which is not stored in a map as a kptr, must be released by * calling bpf_cgroup_release(). * @cgrp: The cgroup on which a reference is being acquired. */ __bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) { return cgroup_tryget(cgrp) ? cgrp : NULL; } /** * bpf_cgroup_release - Release the reference acquired on a cgroup. * If this kfunc is invoked in an RCU read region, the cgroup is guaranteed to * not be freed until the current grace period has ended, even if its refcount * drops to 0. * @cgrp: The cgroup on which a reference is being released. */ __bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp) { cgroup_put(cgrp); } __bpf_kfunc void bpf_cgroup_release_dtor(void *cgrp) { cgroup_put(cgrp); } CFI_NOSEAL(bpf_cgroup_release_dtor); /** * bpf_cgroup_ancestor - Perform a lookup on an entry in a cgroup's ancestor * array. A cgroup returned by this kfunc which is not subsequently stored in a * map, must be released by calling bpf_cgroup_release(). * @cgrp: The cgroup for which we're performing a lookup. * @level: The level of ancestor to look up. */ __bpf_kfunc struct cgroup *bpf_cgroup_ancestor(struct cgroup *cgrp, int level) { struct cgroup *ancestor; if (level > cgrp->level || level < 0) return NULL; /* cgrp's refcnt could be 0 here, but ancestors can still be accessed */ ancestor = cgrp->ancestors[level]; if (!cgroup_tryget(ancestor)) return NULL; return ancestor; } /** * bpf_cgroup_from_id - Find a cgroup from its ID. A cgroup returned by this * kfunc which is not subsequently stored in a map, must be released by calling * bpf_cgroup_release(). * @cgid: cgroup id. */ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) { struct cgroup *cgrp; cgrp = cgroup_get_from_id(cgid); if (IS_ERR(cgrp)) return NULL; return cgrp; } /** * bpf_task_under_cgroup - wrap task_under_cgroup_hierarchy() as a kfunc, test * task's membership of cgroup ancestry. * @task: the task to be tested * @ancestor: possible ancestor of @task's cgroup * * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. * It follows all the same rules as cgroup_is_descendant, and only applies * to the default hierarchy. */ __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) { long ret; rcu_read_lock(); ret = task_under_cgroup_hierarchy(task, ancestor); rcu_read_unlock(); return ret; } /** * bpf_task_get_cgroup1 - Acquires the associated cgroup of a task within a * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its * hierarchy ID. * @task: The target task * @hierarchy_id: The ID of a cgroup1 hierarchy * * On success, the cgroup is returen. On failure, NULL is returned. */ __bpf_kfunc struct cgroup * bpf_task_get_cgroup1(struct task_struct *task, int hierarchy_id) { struct cgroup *cgrp = task_get_cgroup1(task, hierarchy_id); if (IS_ERR(cgrp)) return NULL; return cgrp; } #endif /* CONFIG_CGROUPS */ /** * bpf_task_from_pid - Find a struct task_struct from its pid by looking it up * in the root pid namespace idr. If a task is returned, it must either be * stored in a map, or released with bpf_task_release(). * @pid: The pid of the task being looked up. */ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) { struct task_struct *p; rcu_read_lock(); p = find_task_by_pid_ns(pid, &init_pid_ns); if (p) p = bpf_task_acquire(p); rcu_read_unlock(); return p; } /** * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. * @p: The dynptr whose data slice to retrieve * @offset: Offset into the dynptr * @buffer__opt: User-provided buffer to copy contents into. May be NULL * @buffer__szk: Size (in bytes) of the buffer if present. This is the * length of the requested slice. This must be a constant. * * For non-skb and non-xdp type dynptrs, there is no difference between * bpf_dynptr_slice and bpf_dynptr_data. * * If buffer__opt is NULL, the call will fail if buffer_opt was needed. * * If the intention is to write to the data slice, please use * bpf_dynptr_slice_rdwr. * * The user must check that the returned pointer is not null before using it. * * Please note that in the case of skb and xdp dynptrs, bpf_dynptr_slice * does not change the underlying packet data pointers, so a call to * bpf_dynptr_slice will not invalidate any ctx->data/data_end pointers in * the bpf program. * * Return: NULL if the call failed (eg invalid dynptr), pointer to a read-only * data slice (can be either direct pointer to the data or a pointer to the user * provided buffer, with its contents containing the data, if unable to obtain * direct pointer) */ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, void *buffer__opt, u32 buffer__szk) { const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; enum bpf_dynptr_type type; u32 len = buffer__szk; int err; if (!ptr->data) return NULL; err = bpf_dynptr_check_off_len(ptr, offset, len); if (err) return NULL; type = bpf_dynptr_get_type(ptr); switch (type) { case BPF_DYNPTR_TYPE_LOCAL: case BPF_DYNPTR_TYPE_RINGBUF: return ptr->data + ptr->offset + offset; case BPF_DYNPTR_TYPE_SKB: if (buffer__opt) return skb_header_pointer(ptr->data, ptr->offset + offset, len, buffer__opt); else return skb_pointer_if_linear(ptr->data, ptr->offset + offset, len); case BPF_DYNPTR_TYPE_XDP: { void *xdp_ptr = bpf_xdp_pointer(ptr->data, ptr->offset + offset, len); if (!IS_ERR_OR_NULL(xdp_ptr)) return xdp_ptr; if (!buffer__opt) return NULL; bpf_xdp_copy_buf(ptr->data, ptr->offset + offset, buffer__opt, len, false); return buffer__opt; } default: WARN_ONCE(true, "unknown dynptr type %d\n", type); return NULL; } } /** * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. * @p: The dynptr whose data slice to retrieve * @offset: Offset into the dynptr * @buffer__opt: User-provided buffer to copy contents into. May be NULL * @buffer__szk: Size (in bytes) of the buffer if present. This is the * length of the requested slice. This must be a constant. * * For non-skb and non-xdp type dynptrs, there is no difference between * bpf_dynptr_slice and bpf_dynptr_data. * * If buffer__opt is NULL, the call will fail if buffer_opt was needed. * * The returned pointer is writable and may point to either directly the dynptr * data at the requested offset or to the buffer if unable to obtain a direct * data pointer to (example: the requested slice is to the paged area of an skb * packet). In the case where the returned pointer is to the buffer, the user * is responsible for persisting writes through calling bpf_dynptr_write(). This * usually looks something like this pattern: * * struct eth_hdr *eth = bpf_dynptr_slice_rdwr(&dynptr, 0, buffer, sizeof(buffer)); * if (!eth) * return TC_ACT_SHOT; * * // mutate eth header // * * if (eth == buffer) * bpf_dynptr_write(&ptr, 0, buffer, sizeof(buffer), 0); * * Please note that, as in the example above, the user must check that the * returned pointer is not null before using it. * * Please also note that in the case of skb and xdp dynptrs, bpf_dynptr_slice_rdwr * does not change the underlying packet data pointers, so a call to * bpf_dynptr_slice_rdwr will not invalidate any ctx->data/data_end pointers in * the bpf program. * * Return: NULL if the call failed (eg invalid dynptr), pointer to a * data slice (can be either direct pointer to the data or a pointer to the user * provided buffer, with its contents containing the data, if unable to obtain * direct pointer) */ __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, void *buffer__opt, u32 buffer__szk) { const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) return NULL; /* bpf_dynptr_slice_rdwr is the same logic as bpf_dynptr_slice. * * For skb-type dynptrs, it is safe to write into the returned pointer * if the bpf program allows skb data writes. There are two possibilities * that may occur when calling bpf_dynptr_slice_rdwr: * * 1) The requested slice is in the head of the skb. In this case, the * returned pointer is directly to skb data, and if the skb is cloned, the * verifier will have uncloned it (see bpf_unclone_prologue()) already. * The pointer can be directly written into. * * 2) Some portion of the requested slice is in the paged buffer area. * In this case, the requested data will be copied out into the buffer * and the returned pointer will be a pointer to the buffer. The skb * will not be pulled. To persist the write, the user will need to call * bpf_dynptr_write(), which will pull the skb and commit the write. * * Similarly for xdp programs, if the requested slice is not across xdp * fragments, then a direct pointer will be returned, otherwise the data * will be copied out into the buffer and the user will need to call * bpf_dynptr_write() to commit changes. */ return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); } __bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) { struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; u32 size; if (!ptr->data || start > end) return -EINVAL; size = __bpf_dynptr_size(ptr); if (start > size || end > size) return -ERANGE; ptr->offset += start; bpf_dynptr_set_size(ptr, end - start); return 0; } __bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p) { struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; return !ptr->data; } __bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) { struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; if (!ptr->data) return false; return __bpf_dynptr_is_rdonly(ptr); } __bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) { struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; if (!ptr->data) return -EINVAL; return __bpf_dynptr_size(ptr); } __bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p, struct bpf_dynptr *clone__uninit) { struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit; struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; if (!ptr->data) { bpf_dynptr_set_null(clone); return -EINVAL; } *clone = *ptr; return 0; } __bpf_kfunc void *bpf_cast_to_kern_ctx(void *obj) { return obj; } __bpf_kfunc void *bpf_rdonly_cast(const void *obj__ign, u32 btf_id__k) { return (void *)obj__ign; } __bpf_kfunc void bpf_rcu_read_lock(void) { rcu_read_lock(); } __bpf_kfunc void bpf_rcu_read_unlock(void) { rcu_read_unlock(); } struct bpf_throw_ctx { struct bpf_prog_aux *aux; u64 sp; u64 bp; int cnt; }; static bool bpf_stack_walker(void *cookie, u64 ip, u64 sp, u64 bp) { struct bpf_throw_ctx *ctx = cookie; struct bpf_prog *prog; if (!is_bpf_text_address(ip)) return !ctx->cnt; prog = bpf_prog_ksym_find(ip); ctx->cnt++; if (bpf_is_subprog(prog)) return true; ctx->aux = prog->aux; ctx->sp = sp; ctx->bp = bp; return false; } __bpf_kfunc void bpf_throw(u64 cookie) { struct bpf_throw_ctx ctx = {}; arch_bpf_stack_walk(bpf_stack_walker, &ctx); WARN_ON_ONCE(!ctx.aux); if (ctx.aux) WARN_ON_ONCE(!ctx.aux->exception_boundary); WARN_ON_ONCE(!ctx.bp); WARN_ON_ONCE(!ctx.cnt); /* Prevent KASAN false positives for CONFIG_KASAN_STACK by unpoisoning * deeper stack depths than ctx.sp as we do not return from bpf_throw, * which skips compiler generated instrumentation to do the same. */ kasan_unpoison_task_stack_below((void *)(long)ctx.sp); ctx.aux->bpf_exception_cb(cookie, ctx.sp, ctx.bp, 0, 0); WARN(1, "A call to BPF exception callback should never return\n"); } __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) { struct bpf_async_kern *async = (struct bpf_async_kern *)wq; struct bpf_map *map = p__map; BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_wq)); BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_wq)); if (flags) return -EINVAL; return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); } __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) { struct bpf_async_kern *async = (struct bpf_async_kern *)wq; struct bpf_work *w; if (in_nmi()) return -EOPNOTSUPP; if (flags) return -EINVAL; w = READ_ONCE(async->work); if (!w || !READ_ONCE(w->cb.prog)) return -EINVAL; schedule_work(&w->work); return 0; } __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, int (callback_fn)(void *map, int *key, void *value), unsigned int flags, void *aux__ign) { struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign; struct bpf_async_kern *async = (struct bpf_async_kern *)wq; if (flags) return -EINVAL; return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); } __bpf_kfunc void bpf_preempt_disable(void) { preempt_disable(); } __bpf_kfunc void bpf_preempt_enable(void) { preempt_enable(); } struct bpf_iter_bits { __u64 __opaque[2]; } __aligned(8); struct bpf_iter_bits_kern { union { unsigned long *bits; unsigned long bits_copy; }; u32 nr_bits; int bit; } __aligned(8); /** * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area * @it: The new bpf_iter_bits to be created * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over * @nr_words: The size of the specified memory area, measured in 8-byte units. * Due to the limitation of memalloc, it can't be greater than 512. * * This function initializes a new bpf_iter_bits structure for iterating over * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It * copies the data of the memory area to the newly created bpf_iter_bits @it for * subsequent iteration operations. * * On success, 0 is returned. On failure, ERR is returned. */ __bpf_kfunc int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) { struct bpf_iter_bits_kern *kit = (void *)it; u32 nr_bytes = nr_words * sizeof(u64); u32 nr_bits = BYTES_TO_BITS(nr_bytes); int err; BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits)); BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) != __alignof__(struct bpf_iter_bits)); kit->nr_bits = 0; kit->bits_copy = 0; kit->bit = -1; if (!unsafe_ptr__ign || !nr_words) return -EINVAL; /* Optimization for u64 mask */ if (nr_bits == 64) { err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); if (err) return -EFAULT; kit->nr_bits = nr_bits; return 0; } /* Fallback to memalloc */ kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); if (!kit->bits) return -ENOMEM; err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); if (err) { bpf_mem_free(&bpf_global_ma, kit->bits); return err; } kit->nr_bits = nr_bits; return 0; } /** * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits * @it: The bpf_iter_bits to be checked * * This function returns a pointer to a number representing the value of the * next bit in the bits. * * If there are no further bits available, it returns NULL. */ __bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it) { struct bpf_iter_bits_kern *kit = (void *)it; u32 nr_bits = kit->nr_bits; const unsigned long *bits; int bit; if (nr_bits == 0) return NULL; bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; bit = find_next_bit(bits, nr_bits, kit->bit + 1); if (bit >= nr_bits) { kit->nr_bits = 0; return NULL; } kit->bit = bit; return &kit->bit; } /** * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits * @it: The bpf_iter_bits to be destroyed * * Destroy the resource associated with the bpf_iter_bits. */ __bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) { struct bpf_iter_bits_kern *kit = (void *)it; if (kit->nr_bits <= 64) return; bpf_mem_free(&bpf_global_ma, kit->bits); } __bpf_kfunc_end_defs(); BTF_KFUNCS_START(generic_btf_ids) #ifdef CONFIG_CRASH_DUMP BTF_ID_FLAGS(func, crash_kexec, KF_DESTRUCTIVE) #endif BTF_ID_FLAGS(func, bpf_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_percpu_obj_new_impl, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_obj_drop_impl, KF_RELEASE) BTF_ID_FLAGS(func, bpf_percpu_obj_drop_impl, KF_RELEASE) BTF_ID_FLAGS(func, bpf_refcount_acquire_impl, KF_ACQUIRE | KF_RET_NULL | KF_RCU) BTF_ID_FLAGS(func, bpf_list_push_front_impl) BTF_ID_FLAGS(func, bpf_list_push_back_impl) BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_rbtree_add_impl) BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL) #ifdef CONFIG_CGROUPS BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE) BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_task_under_cgroup, KF_RCU) BTF_ID_FLAGS(func, bpf_task_get_cgroup1, KF_ACQUIRE | KF_RCU | KF_RET_NULL) #endif BTF_ID_FLAGS(func, bpf_task_from_pid, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_throw) BTF_KFUNCS_END(generic_btf_ids) static const struct btf_kfunc_id_set generic_kfunc_set = { .owner = THIS_MODULE, .set = &generic_btf_ids, }; BTF_ID_LIST(generic_dtor_ids) BTF_ID(struct, task_struct) BTF_ID(func, bpf_task_release_dtor) #ifdef CONFIG_CGROUPS BTF_ID(struct, cgroup) BTF_ID(func, bpf_cgroup_release_dtor) #endif BTF_KFUNCS_START(common_btf_ids) BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx) BTF_ID_FLAGS(func, bpf_rdonly_cast) BTF_ID_FLAGS(func, bpf_rcu_read_lock) BTF_ID_FLAGS(func, bpf_rcu_read_unlock) BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) #ifdef CONFIG_CGROUPS BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) #endif BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_dynptr_adjust) BTF_ID_FLAGS(func, bpf_dynptr_is_null) BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) BTF_ID_FLAGS(func, bpf_dynptr_size) BTF_ID_FLAGS(func, bpf_dynptr_clone) BTF_ID_FLAGS(func, bpf_modify_return_test_tp) BTF_ID_FLAGS(func, bpf_wq_init) BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) BTF_ID_FLAGS(func, bpf_wq_start) BTF_ID_FLAGS(func, bpf_preempt_disable) BTF_ID_FLAGS(func, bpf_preempt_enable) BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { .owner = THIS_MODULE, .set = &common_btf_ids, }; static int __init kfunc_init(void) { int ret; const struct btf_id_dtor_kfunc generic_dtors[] = { { .btf_id = generic_dtor_ids[0], .kfunc_btf_id = generic_dtor_ids[1] }, #ifdef CONFIG_CGROUPS { .btf_id = generic_dtor_ids[2], .kfunc_btf_id = generic_dtor_ids[3] }, #endif }; ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &generic_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &generic_kfunc_set); ret = ret ?: register_btf_id_dtor_kfuncs(generic_dtors, ARRAY_SIZE(generic_dtors), THIS_MODULE); return ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &common_kfunc_set); } late_initcall(kfunc_init); /* Get a pointer to dynptr data up to len bytes for read only access. If * the dynptr doesn't have continuous data up to len bytes, return NULL. */ const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) { const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; return bpf_dynptr_slice(p, 0, NULL, len); } /* Get a pointer to dynptr data up to len bytes for read write access. If * the dynptr doesn't have continuous data up to len bytes, or the dynptr * is read only, return NULL. */ void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len) { if (__bpf_dynptr_is_rdonly(ptr)) return NULL; return (void *)__bpf_dynptr_data(ptr, len); } |
| 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 | // SPDX-License-Identifier: GPL-2.0-only /* Linux driver for devices based on the DiBcom DiB0700 USB bridge * * Copyright (C) 2005-9 DiBcom, SA et al */ #include "dib0700.h" #include "dib3000mc.h" #include "dib7000m.h" #include "dib7000p.h" #include "dib8000.h" #include "dib9000.h" #include "mt2060.h" #include "mt2266.h" #include "xc2028.h" #include "xc5000.h" #include "xc4000.h" #include "s5h1411.h" #include "dib0070.h" #include "dib0090.h" #include "lgdt3305.h" #include "mxl5007t.h" #include "mn88472.h" #include "tda18250.h" static int force_lna_activation; module_param(force_lna_activation, int, 0644); MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplifier(s) (LNA), if applicable for the device (default: 0=automatic/off)."); struct dib0700_adapter_state { int (*set_param_save) (struct dvb_frontend *); const struct firmware *frontend_firmware; struct dib7000p_ops dib7000p_ops; struct dib8000_ops dib8000_ops; }; /* Hauppauge Nova-T 500 (aka Bristol) * has a LNA on GPIO0 which is enabled by setting 1 */ static struct mt2060_config bristol_mt2060_config[2] = { { .i2c_address = 0x60, .clock_out = 3, }, { .i2c_address = 0x61, } }; static struct dibx000_agc_config bristol_dib3000p_mt2060_agc_config = { .band_caps = BAND_VHF | BAND_UHF, .setup = (1 << 8) | (5 << 5) | (0 << 4) | (0 << 3) | (0 << 2) | (2 << 0), .agc1_max = 42598, .agc1_min = 17694, .agc2_max = 45875, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 59, .agc1_slope1 = 0, .agc1_slope2 = 69, .agc2_pt1 = 0, .agc2_pt2 = 59, .agc2_slope1 = 111, .agc2_slope2 = 28, }; static struct dib3000mc_config bristol_dib3000mc_config[2] = { { .agc = &bristol_dib3000p_mt2060_agc_config, .max_time = 0x196, .ln_adc_level = 0x1cc7, .output_mpeg2_in_188_bytes = 1, }, { .agc = &bristol_dib3000p_mt2060_agc_config, .max_time = 0x196, .ln_adc_level = 0x1cc7, .output_mpeg2_in_188_bytes = 1, } }; static int bristol_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; if (adap->id == 0) { dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); if (force_lna_activation) dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); else dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 0); if (dib3000mc_i2c_enumeration(&adap->dev->i2c_adap, 2, DEFAULT_DIB3000P_I2C_ADDRESS, bristol_dib3000mc_config) != 0) { dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(10); return -ENODEV; } } st->mt2060_if1[adap->id] = 1220; return (adap->fe_adap[0].fe = dvb_attach(dib3000mc_attach, &adap->dev->i2c_adap, (10 + adap->id) << 1, &bristol_dib3000mc_config[adap->id])) == NULL ? -ENODEV : 0; } static int eeprom_read(struct i2c_adapter *adap,u8 adrs,u8 *pval) { struct i2c_msg msg[2] = { { .addr = 0x50, .flags = 0, .buf = &adrs, .len = 1 }, { .addr = 0x50, .flags = I2C_M_RD, .buf = pval, .len = 1 }, }; if (i2c_transfer(adap, msg, 2) != 2) return -EREMOTEIO; return 0; } static int bristol_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *prim_i2c = &adap->dev->i2c_adap; struct i2c_adapter *tun_i2c = dib3000mc_get_tuner_i2c_master(adap->fe_adap[0].fe, 1); s8 a; int if1=1220; if (adap->dev->udev->descriptor.idVendor == cpu_to_le16(USB_VID_HAUPPAUGE) && adap->dev->udev->descriptor.idProduct == cpu_to_le16(USB_PID_HAUPPAUGE_NOVA_T_500_2)) { if (!eeprom_read(prim_i2c,0x59 + adap->id,&a)) if1=1220+a; } return dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &bristol_mt2060_config[adap->id], if1) == NULL ? -ENODEV : 0; } /* STK7700D: Pinnacle/Terratec/Hauppauge Dual DVB-T Diversity */ /* MT226x */ static struct dibx000_agc_config stk7700d_7000p_mt2266_agc_config[2] = { { BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), 1130, 21, 0, 118, 0, 3530, 1, 0, 65535, 33770, 65535, 23592, 0, 62, 255, 64, 64, 132, 192, 80, 80, 17, 27, 23, 51, 1, }, { BAND_VHF | BAND_LBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=1, P_agc_inv_pwm2=1, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ (0 << 15) | (0 << 14) | (1 << 11) | (1 << 10) | (1 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), 2372, 21, 0, 118, 0, 3530, 1, 0, 65535, 0, 65535, 23592, 0, 128, 128, 128, 0, 128, 253, 81, 0, 17, 27, 23, 51, 1, } }; static struct dibx000_bandwidth_config stk7700d_mt2266_pll_config = { .internal = 60000, .sampling = 30000, .pll_prediv = 1, .pll_ratio = 8, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = 0, .timf = 20452225, }; static struct dib7000p_config stk7700d_dib7000p_mt2266_config[] = { { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .agc_config_count = 2, .agc = stk7700d_7000p_mt2266_agc_config, .bw = &stk7700d_mt2266_pll_config, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, }, { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .agc_config_count = 2, .agc = stk7700d_7000p_mt2266_agc_config, .bw = &stk7700d_mt2266_pll_config, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, } }; static struct mt2266_config stk7700d_mt2266_config[2] = { { .i2c_address = 0x60 }, { .i2c_address = 0x60 } }; static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (adap->id == 0) { dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80 + (adap->id << 1), &stk7700d_dib7000p_mt2266_config[adap->id]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (adap->id == 0) { dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 2, 18, stk7700d_dib7000p_mt2266_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80 + (adap->id << 1), &stk7700d_dib7000p_mt2266_config[adap->id]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk7700d_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *tun_i2c; struct dib0700_adapter_state *state = adap->priv; tun_i2c = state->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); return dvb_attach(mt2266_attach, adap->fe_adap[0].fe, tun_i2c, &stk7700d_mt2266_config[adap->id]) == NULL ? -ENODEV : 0; } /* STK7700-PH: Digital/Analog Hybrid Tuner, e.h. Cinergy HT USB HE */ static struct dibx000_agc_config xc3028_agc_config = { .band_caps = BAND_VHF | BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=0, * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (0 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), .inv_gain = 712, .time_stabiliz = 21, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 2867, .wbd_sel = 0, .wbd_alpha = 2, .agc1_max = 0, .agc1_min = 0, .agc2_max = 39718, .agc2_min = 9930, .agc1_pt1 = 0, .agc1_pt2 = 0, .agc1_pt3 = 0, .agc1_slope1 = 0, .agc1_slope2 = 0, .agc2_pt1 = 0, .agc2_pt2 = 128, .agc2_slope1 = 29, .agc2_slope2 = 29, .alpha_mant = 17, .alpha_exp = 27, .beta_mant = 23, .beta_exp = 51, .perform_agc_softsplit = 1, }; /* PLL Configuration for COFDM BW_MHz = 8.00 with external clock = 30.00 */ static struct dibx000_bandwidth_config xc3028_bw_config = { .internal = 60000, .sampling = 30000, .pll_prediv = 1, .pll_ratio = 8, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 0, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), /* sad_cfg: refsel, sel, freq_15k */ .ifreq = (1 << 25) | 5816102, /* ifreq = 5.200000 MHz */ .timf = 20452225, .xtal_hz = 30000000, }; static struct dib7000p_config stk7700ph_dib7700_xc3028_config = { .output_mpeg2_in_188_bytes = 1, .tuner_is_baseband = 1, .agc_config_count = 1, .agc = &xc3028_agc_config, .bw = &xc3028_bw_config, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, }; static int stk7700ph_xc3028_callback(void *ptr, int component, int command, int arg) { struct dvb_usb_adapter *adap = ptr; struct dib0700_adapter_state *state = adap->priv; switch (command) { case XC2028_TUNER_RESET: /* Send the tuner in then out of reset */ state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 0); msleep(10); state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); break; case XC2028_RESET_CLK: case XC2028_I2C_FLUSH: break; default: err("%s: unknown command %d, arg %d\n", __func__, command, arg); return -EINVAL; } return 0; } static struct xc2028_ctrl stk7700ph_xc3028_ctrl = { .fname = XC2028_DEFAULT_FIRMWARE, .max_len = 64, .demod = XC3028_FE_DIBCOM52, }; static struct xc2028_config stk7700ph_xc3028_config = { .i2c_addr = 0x61, .ctrl = &stk7700ph_xc3028_ctrl, }; static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap) { struct usb_device_descriptor *desc = &adap->dev->udev->descriptor; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (desc->idVendor == cpu_to_le16(USB_VID_PINNACLE) && desc->idProduct == cpu_to_le16(USB_PID_PINNACLE_EXPRESSCARD_320CX)) dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); else dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); msleep(10); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, &stk7700ph_dib7700_xc3028_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &stk7700ph_dib7700_xc3028_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk7700ph_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *tun_i2c; struct dib0700_adapter_state *state = adap->priv; tun_i2c = state->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); stk7700ph_xc3028_config.i2c_adap = tun_i2c; /* FIXME: generalize & move to common area */ adap->fe_adap[0].fe->callback = stk7700ph_xc3028_callback; return dvb_attach(xc2028_attach, adap->fe_adap[0].fe, &stk7700ph_xc3028_config) == NULL ? -ENODEV : 0; } #define DEFAULT_RC_INTERVAL 50 /* * This function is used only when firmware is < 1.20 version. Newer * firmwares use bulk mode, with functions implemented at dib0700_core, * at dib0700_rc_urb_completion() */ static int dib0700_rc_query_old_firmware(struct dvb_usb_device *d) { enum rc_proto protocol; u32 scancode; u8 toggle; int i; struct dib0700_state *st = d->priv; if (st->fw_version >= 0x10200) { /* For 1.20 firmware , We need to keep the RC polling callback so we can reuse the input device setup in dvb-usb-remote.c. However, the actual work is being done in the bulk URB completion handler. */ return 0; } st->buf[0] = REQUEST_POLL_RC; st->buf[1] = 0; i = dib0700_ctrl_rd(d, st->buf, 2, st->buf, 4); if (i <= 0) { err("RC Query Failed"); return -EIO; } /* losing half of KEY_0 events from Philipps rc5 remotes.. */ if (st->buf[0] == 0 && st->buf[1] == 0 && st->buf[2] == 0 && st->buf[3] == 0) return 0; /* info("%d: %2X %2X %2X %2X",dvb_usb_dib0700_ir_proto,(int)st->buf[3 - 2],(int)st->buf[3 - 3],(int)st->buf[3 - 1],(int)st->buf[3]); */ dib0700_rc_setup(d, NULL); /* reset ir sensor data to prevent false events */ switch (d->props.rc.core.protocol) { case RC_PROTO_BIT_NEC: /* NEC protocol sends repeat code as 0 0 0 FF */ if ((st->buf[3 - 2] == 0x00) && (st->buf[3 - 3] == 0x00) && (st->buf[3] == 0xff)) { rc_repeat(d->rc_dev); return 0; } protocol = RC_PROTO_NEC; scancode = RC_SCANCODE_NEC(st->buf[3 - 2], st->buf[3 - 3]); toggle = 0; break; default: /* RC-5 protocol changes toggle bit on new keypress */ protocol = RC_PROTO_RC5; scancode = RC_SCANCODE_RC5(st->buf[3 - 2], st->buf[3 - 3]); toggle = st->buf[3 - 1]; break; } rc_keydown(d->rc_dev, protocol, scancode, toggle); return 0; } /* STK7700P: Hauppauge Nova-T Stick, AVerMedia Volar */ static struct dibx000_agc_config stk7700p_7000m_mt2060_agc_config = { BAND_UHF | BAND_VHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), 712, 41, 0, 118, 0, 4095, 0, 0, 42598, 17694, 45875, 2621, 0, 76, 139, 52, 59, 107, 172, 57, 70, 21, 25, 28, 48, 1, { 0, 107, 51800, 24700 }, }; static struct dibx000_agc_config stk7700p_7000p_mt2060_agc_config = { .band_caps = BAND_UHF | BAND_VHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=2, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (2 << 1) | (0 << 0), .inv_gain = 712, .time_stabiliz = 41, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 4095, .wbd_sel = 0, .wbd_alpha = 0, .agc1_max = 42598, .agc1_min = 16384, .agc2_max = 42598, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 137, .agc1_pt3 = 255, .agc1_slope1 = 0, .agc1_slope2 = 255, .agc2_pt1 = 0, .agc2_pt2 = 0, .agc2_slope1 = 0, .agc2_slope2 = 41, .alpha_mant = 15, .alpha_exp = 25, .beta_mant = 28, .beta_exp = 48, .perform_agc_softsplit = 0, }; static struct dibx000_bandwidth_config stk7700p_pll_config = { .internal = 60000, .sampling = 30000, .pll_prediv = 1, .pll_ratio = 8, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 0, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = 60258167, .timf = 20452225, .xtal_hz = 30000000, }; static struct dib7000m_config stk7700p_dib7000m_config = { .dvbt_mode = 1, .output_mpeg2_in_188_bytes = 1, .quartz_direct = 1, .agc_config_count = 1, .agc = &stk7700p_7000m_mt2060_agc_config, .bw = &stk7700p_pll_config, .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS, }; static struct dib7000p_config stk7700p_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &stk7700p_7000p_mt2060_agc_config, .bw = &stk7700p_pll_config, .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS, }; static int stk7700p_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; /* unless there is no real power management in DVB - we leave the device on GPIO6 */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(50); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(10); dib0700_ctrl_clock(adap->dev, 72, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(100); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); st->mt2060_if1[0] = 1220; if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap)) { adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 18, &stk7700p_dib7000p_config); st->is_dib7000pc = 1; } else { memset(&state->dib7000p_ops, 0, sizeof(state->dib7000p_ops)); adap->fe_adap[0].fe = dvb_attach(dib7000m_attach, &adap->dev->i2c_adap, 18, &stk7700p_dib7000m_config); } return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static struct mt2060_config stk7700p_mt2060_config = { 0x60 }; static int stk7700p_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *prim_i2c = &adap->dev->i2c_adap; struct dib0700_state *st = adap->dev->priv; struct i2c_adapter *tun_i2c; struct dib0700_adapter_state *state = adap->priv; s8 a; int if1=1220; if (adap->dev->udev->descriptor.idVendor == cpu_to_le16(USB_VID_HAUPPAUGE) && adap->dev->udev->descriptor.idProduct == cpu_to_le16(USB_PID_HAUPPAUGE_NOVA_T_STICK)) { if (!eeprom_read(prim_i2c,0x58,&a)) if1=1220+a; } if (st->is_dib7000pc) tun_i2c = state->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); else tun_i2c = dib7000m_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); return dvb_attach(mt2060_attach, adap->fe_adap[0].fe, tun_i2c, &stk7700p_mt2060_config, if1) == NULL ? -ENODEV : 0; } /* DIB7070 generic */ static struct dibx000_agc_config dib7070_agc_config = { .band_caps = BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=5, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 600, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 3530, .wbd_sel = 1, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 40, .agc1_pt3 = 183, .agc1_slope1 = 206, .agc1_slope2 = 255, .agc2_pt1 = 72, .agc2_pt2 = 152, .agc2_slope1 = 88, .agc2_slope2 = 90, .alpha_mant = 17, .alpha_exp = 27, .beta_mant = 23, .beta_exp = 51, .perform_agc_softsplit = 0, }; static int dib7070_tuner_reset(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; deb_info("reset: %d", onoff); return state->dib7000p_ops.set_gpio(fe, 8, 0, !onoff); } static int dib7070_tuner_sleep(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; deb_info("sleep: %d", onoff); return state->dib7000p_ops.set_gpio(fe, 9, 0, onoff); } static struct dib0070_config dib7070p_dib0070_config[2] = { { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib7070_tuner_reset, .sleep = dib7070_tuner_sleep, .clock_khz = 12000, .clock_pad_drive = 4, .charge_pump = 2, }, { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib7070_tuner_reset, .sleep = dib7070_tuner_sleep, .clock_khz = 12000, .charge_pump = 2, } }; static struct dib0070_config dib7770p_dib0070_config = { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib7070_tuner_reset, .sleep = dib7070_tuner_sleep, .clock_khz = 12000, .clock_pad_drive = 0, .flip_chip = 1, .charge_pump = 2, }; static int dib7070_set_param_override(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u16 offset; u8 band = BAND_OF_FREQUENCY(p->frequency/1000); switch (band) { case BAND_VHF: offset = 950; break; case BAND_UHF: default: offset = 550; break; } deb_info("WBD for DiB7000P: %d\n", offset + dib0070_wbd_offset(fe)); state->dib7000p_ops.set_wbd_ref(fe, offset + dib0070_wbd_offset(fe)); return state->set_param_save(fe); } static int dib7770_set_param_override(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u16 offset; u8 band = BAND_OF_FREQUENCY(p->frequency/1000); switch (band) { case BAND_VHF: state->dib7000p_ops.set_gpio(fe, 0, 0, 1); offset = 850; break; case BAND_UHF: default: state->dib7000p_ops.set_gpio(fe, 0, 0, 0); offset = 250; break; } deb_info("WBD for DiB7000P: %d\n", offset + dib0070_wbd_offset(fe)); state->dib7000p_ops.set_wbd_ref(fe, offset + dib0070_wbd_offset(fe)); return state->set_param_save(fe); } static int dib7770p_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7770p_dib0070_config) == NULL) return -ENODEV; st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7770_set_param_override; return 0; } static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (adap->id == 0) { if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7070p_dib0070_config[0]) == NULL) return -ENODEV; } else { if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib7070p_dib0070_config[1]) == NULL) return -ENODEV; } st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7070_set_param_override; return 0; } static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { struct dib0700_adapter_state *state = adapter->priv; struct dib0700_state *st = adapter->dev->priv; if (st->is_dib7000pc) return state->dib7000p_ops.pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); return dib7000m_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); } static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) { struct dib0700_state *st = adapter->dev->priv; struct dib0700_adapter_state *state = adapter->priv; if (st->is_dib7000pc) return state->dib7000p_ops.pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); return dib7000m_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); } static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { struct dib0700_adapter_state *state = adapter->priv; return state->dib7000p_ops.pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); } static int stk70x0p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) { struct dib0700_adapter_state *state = adapter->priv; return state->dib7000p_ops.pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); } static struct dibx000_bandwidth_config dib7070_bw_config_12_mhz = { .internal = 60000, .sampling = 15000, .pll_prediv = 1, .pll_ratio = 20, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = (0 << 25) | 0, .timf = 20452225, .xtal_hz = 12000000, }; static struct dib7000p_config dib7070p_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, }; /* STK7070P */ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) { struct usb_device_descriptor *p = &adap->dev->udev->descriptor; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) && p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E)) dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); else dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, &dib7070p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &dib7070p_dib7000p_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* STK7770P */ static struct dib7000p_config dib7770p_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .enable_current_mirror = 1, .disable_sample_and_hold = 0, }; static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) { struct usb_device_descriptor *p = &adap->dev->udev->descriptor; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) && p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E)) dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); else dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, &dib7770p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &dib7770p_dib7000p_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* DIB807x generic */ static struct dibx000_agc_config dib807x_agc_config[2] = { { BAND_VHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, * P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, * P_agc_inv_pwm2=0,P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, * P_agc_write=0 */ (0 << 15) | (0 << 14) | (7 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), /* setup*/ 600, /* inv_gain*/ 10, /* time_stabiliz*/ 0, /* alpha_level*/ 118, /* thlock*/ 0, /* wbd_inv*/ 3530, /* wbd_ref*/ 1, /* wbd_sel*/ 5, /* wbd_alpha*/ 65535, /* agc1_max*/ 0, /* agc1_min*/ 65535, /* agc2_max*/ 0, /* agc2_min*/ 0, /* agc1_pt1*/ 40, /* agc1_pt2*/ 183, /* agc1_pt3*/ 206, /* agc1_slope1*/ 255, /* agc1_slope2*/ 72, /* agc2_pt1*/ 152, /* agc2_pt2*/ 88, /* agc2_slope1*/ 90, /* agc2_slope2*/ 17, /* alpha_mant*/ 27, /* alpha_exp*/ 23, /* beta_mant*/ 51, /* beta_exp*/ 0, /* perform_agc_softsplit*/ }, { BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, * P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, * P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, * P_agc_write=0 */ (0 << 15) | (0 << 14) | (1 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), /* setup */ 600, /* inv_gain*/ 10, /* time_stabiliz*/ 0, /* alpha_level*/ 118, /* thlock*/ 0, /* wbd_inv*/ 3530, /* wbd_ref*/ 1, /* wbd_sel*/ 5, /* wbd_alpha*/ 65535, /* agc1_max*/ 0, /* agc1_min*/ 65535, /* agc2_max*/ 0, /* agc2_min*/ 0, /* agc1_pt1*/ 40, /* agc1_pt2*/ 183, /* agc1_pt3*/ 206, /* agc1_slope1*/ 255, /* agc1_slope2*/ 72, /* agc2_pt1*/ 152, /* agc2_pt2*/ 88, /* agc2_slope1*/ 90, /* agc2_slope2*/ 17, /* alpha_mant*/ 27, /* alpha_exp*/ 23, /* beta_mant*/ 51, /* beta_exp*/ 0, /* perform_agc_softsplit*/ } }; static struct dibx000_bandwidth_config dib807x_bw_config_12_mhz = { .internal = 60000, .sampling = 15000, .pll_prediv = 1, .pll_ratio = 20, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (599 << 0), /* sad_cfg: refsel, sel, freq_15k*/ .ifreq = (0 << 25) | 0, /* ifreq = 0.000000 MHz*/ .timf = 18179755, .xtal_hz = 12000000, }; static struct dib8000_config dib807x_dib8000_config[2] = { { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 2, .agc = dib807x_agc_config, .pll = &dib807x_bw_config_12_mhz, .tuner_is_baseband = 1, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .div_cfg = 1, .agc_control = &dib0070_ctrl_agc_filter, .output_mode = OUTMODE_MPEG2_FIFO, .drives = 0x2d98, }, { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 2, .agc = dib807x_agc_config, .pll = &dib807x_bw_config_12_mhz, .tuner_is_baseband = 1, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .agc_control = &dib0070_ctrl_agc_filter, .output_mode = OUTMODE_MPEG2_FIFO, .drives = 0x2d98, } }; static int dib80xx_tuner_reset(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; return state->dib8000_ops.set_gpio(fe, 5, 0, !onoff); } static int dib80xx_tuner_sleep(struct dvb_frontend *fe, int onoff) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; return state->dib8000_ops.set_gpio(fe, 0, 0, onoff); } static const struct dib0070_wbd_gain_cfg dib8070_wbd_gain_cfg[] = { { 240, 7}, { 0xffff, 6}, }; static struct dib0070_config dib807x_dib0070_config[2] = { { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib80xx_tuner_reset, .sleep = dib80xx_tuner_sleep, .clock_khz = 12000, .clock_pad_drive = 4, .vga_filter = 1, .force_crystal_mode = 1, .enable_third_order_filter = 1, .charge_pump = 0, .wbd_gain = dib8070_wbd_gain_cfg, .osc_buffer_state = 0, .freq_offset_khz_uhf = -100, .freq_offset_khz_vhf = -100, }, { .i2c_address = DEFAULT_DIB0070_I2C_ADDRESS, .reset = dib80xx_tuner_reset, .sleep = dib80xx_tuner_sleep, .clock_khz = 12000, .clock_pad_drive = 2, .vga_filter = 1, .force_crystal_mode = 1, .enable_third_order_filter = 1, .charge_pump = 0, .wbd_gain = dib8070_wbd_gain_cfg, .osc_buffer_state = 0, .freq_offset_khz_uhf = -25, .freq_offset_khz_vhf = -25, } }; static int dib807x_set_param_override(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u16 offset = dib0070_wbd_offset(fe); u8 band = BAND_OF_FREQUENCY(p->frequency/1000); switch (band) { case BAND_VHF: offset += 750; break; case BAND_UHF: /* fall-thru wanted */ default: offset += 250; break; } deb_info("WBD for DiB8000: %d\n", offset); state->dib8000_ops.set_wbd_ref(fe, offset); return state->set_param_save(fe); } static int dib807x_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (adap->id == 0) { if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib807x_dib0070_config[0]) == NULL) return -ENODEV; } else { if (dvb_attach(dib0070_attach, adap->fe_adap[0].fe, tun_i2c, &dib807x_dib0070_config[1]) == NULL) return -ENODEV; } st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib807x_set_param_override; return 0; } static int stk80xx_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { struct dib0700_adapter_state *state = adapter->priv; return state->dib8000_ops.pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); } static int stk80xx_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) { struct dib0700_adapter_state *state = adapter->priv; return state->dib8000_ops.pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); } /* STK807x */ static int stk807x_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &dib807x_dib8000_config[0]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* STK807xPVR */ static int stk807xpvr_frontend_attach0(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(500); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); /* initialize IC 0 */ state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x22, 0x80, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &dib807x_dib8000_config[0]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk807xpvr_frontend_attach1(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; /* initialize IC 1 */ state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x12, 0x82, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib807x_dib8000_config[1]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } /* STK8096GP */ static struct dibx000_agc_config dib8090_agc_config[2] = { { .band_caps = BAND_UHF | BAND_VHF | BAND_LBAND | BAND_SBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 787, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 3530, .wbd_sel = 1, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 32, .agc1_pt3 = 114, .agc1_slope1 = 143, .agc1_slope2 = 144, .agc2_pt1 = 114, .agc2_pt2 = 227, .agc2_slope1 = 116, .agc2_slope2 = 117, .alpha_mant = 28, .alpha_exp = 26, .beta_mant = 31, .beta_exp = 51, .perform_agc_softsplit = 0, }, { .band_caps = BAND_CBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, * P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, * P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 787, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 3530, .wbd_sel = 1, .wbd_alpha = 5, .agc1_max = 0, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 32, .agc1_pt3 = 114, .agc1_slope1 = 143, .agc1_slope2 = 144, .agc2_pt1 = 114, .agc2_pt2 = 227, .agc2_slope1 = 116, .agc2_slope2 = 117, .alpha_mant = 28, .alpha_exp = 26, .beta_mant = 31, .beta_exp = 51, .perform_agc_softsplit = 0, } }; static struct dibx000_bandwidth_config dib8090_pll_config_12mhz = { .internal = 54000, .sampling = 13500, .pll_prediv = 1, .pll_ratio = 18, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (599 << 0), .ifreq = (0 << 25) | 0, .timf = 20199727, .xtal_hz = 12000000, }; static int dib8090_get_adc_power(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; return state->dib8000_ops.get_adc_power(fe, 1); } static void dib8090_agc_control(struct dvb_frontend *fe, u8 restart) { deb_info("AGC control callback: %i\n", restart); dib0090_dcc_freq(fe, restart); if (restart == 0) /* before AGC startup */ dib0090_set_dc_servo(fe, 1); } static struct dib8000_config dib809x_dib8000_config[2] = { { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 2, .agc = dib8090_agc_config, .agc_control = dib8090_agc_control, .pll = &dib8090_pll_config_12mhz, .tuner_is_baseband = 1, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .div_cfg = 0x31, .output_mode = OUTMODE_MPEG2_FIFO, .drives = 0x2d98, .diversity_delay = 48, .refclksel = 3, }, { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 2, .agc = dib8090_agc_config, .agc_control = dib8090_agc_control, .pll = &dib8090_pll_config_12mhz, .tuner_is_baseband = 1, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, .div_cfg = 0x31, .output_mode = OUTMODE_DIVERSITY, .drives = 0x2d08, .diversity_delay = 1, .refclksel = 3, } }; static struct dib0090_wbd_slope dib8090_wbd_table[] = { /* max freq ; cold slope ; cold offset ; warm slope ; warm offset ; wbd gain */ { 120, 0, 500, 0, 500, 4 }, /* CBAND */ { 170, 0, 450, 0, 450, 4 }, /* CBAND */ { 380, 48, 373, 28, 259, 6 }, /* VHF */ { 860, 34, 700, 36, 616, 6 }, /* high UHF */ { 0xFFFF, 34, 700, 36, 616, 6 }, /* default */ }; static struct dib0090_config dib809x_dib0090_config = { .io.pll_bypass = 1, .io.pll_range = 1, .io.pll_prediv = 1, .io.pll_loopdiv = 20, .io.adc_clock_ratio = 8, .io.pll_int_loop_filt = 0, .io.clock_khz = 12000, .reset = dib80xx_tuner_reset, .sleep = dib80xx_tuner_sleep, .clkouttobamse = 1, .analog_output = 1, .i2c_address = DEFAULT_DIB0090_I2C_ADDRESS, .use_pwm_agc = 1, .clkoutdrive = 1, .get_adc_power = dib8090_get_adc_power, .freq_offset_khz_uhf = -63, .freq_offset_khz_vhf = -143, .wbd = dib8090_wbd_table, .fref_clock_ratio = 6, }; static u8 dib8090_compute_pll_parameters(struct dvb_frontend *fe) { u8 optimal_pll_ratio = 20; u32 freq_adc, ratio, rest, max = 0; u8 pll_ratio; for (pll_ratio = 17; pll_ratio <= 20; pll_ratio++) { freq_adc = 12 * pll_ratio * (1 << 8) / 16; ratio = ((fe->dtv_property_cache.frequency / 1000) * (1 << 8) / 1000) / freq_adc; rest = ((fe->dtv_property_cache.frequency / 1000) * (1 << 8) / 1000) - ratio * freq_adc; if (rest > freq_adc / 2) rest = freq_adc - rest; deb_info("PLL ratio=%i rest=%i\n", pll_ratio, rest); if ((rest > max) && (rest > 717)) { optimal_pll_ratio = pll_ratio; max = rest; } } deb_info("optimal PLL ratio=%i\n", optimal_pll_ratio); return optimal_pll_ratio; } static int dib8096_set_param_override(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; u8 pll_ratio, band = BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000); u16 target, ltgain, rf_gain_limit; u32 timf; int ret = 0; enum frontend_tune_state tune_state = CT_SHUTDOWN; switch (band) { default: deb_info("Warning : Rf frequency (%iHz) is not in the supported range, using VHF switch ", fe->dtv_property_cache.frequency); fallthrough; case BAND_VHF: state->dib8000_ops.set_gpio(fe, 3, 0, 1); break; case BAND_UHF: state->dib8000_ops.set_gpio(fe, 3, 0, 0); break; } ret = state->set_param_save(fe); if (ret < 0) return ret; if (fe->dtv_property_cache.bandwidth_hz != 6000000) { deb_info("only 6MHz bandwidth is supported\n"); return -EINVAL; } /* Update PLL if needed ratio */ state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, 0); /* Get optimize PLL ratio to remove spurious */ pll_ratio = dib8090_compute_pll_parameters(fe); if (pll_ratio == 17) timf = 21387946; else if (pll_ratio == 18) timf = 20199727; else if (pll_ratio == 19) timf = 19136583; else timf = 18179756; /* Update ratio */ state->dib8000_ops.update_pll(fe, &dib8090_pll_config_12mhz, fe->dtv_property_cache.bandwidth_hz / 1000, pll_ratio); state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, timf); if (band != BAND_CBAND) { /* dib0090_get_wbd_target is returning any possible temperature compensated wbd-target */ target = (dib0090_get_wbd_target(fe) * 8 * 18 / 33 + 1) / 2; state->dib8000_ops.set_wbd_ref(fe, target); } if (band == BAND_CBAND) { deb_info("tuning in CBAND - soft-AGC startup\n"); dib0090_set_tune_state(fe, CT_AGC_START); do { ret = dib0090_gain_control(fe); msleep(ret); tune_state = dib0090_get_tune_state(fe); if (tune_state == CT_AGC_STEP_0) state->dib8000_ops.set_gpio(fe, 6, 0, 1); else if (tune_state == CT_AGC_STEP_1) { dib0090_get_current_gain(fe, NULL, NULL, &rf_gain_limit, <gain); if (rf_gain_limit < 2000) /* activate the external attenuator in case of very high input power */ state->dib8000_ops.set_gpio(fe, 6, 0, 0); } } while (tune_state < CT_AGC_STOP); deb_info("switching to PWM AGC\n"); dib0090_pwm_gain_reset(fe); state->dib8000_ops.pwm_agc_reset(fe); state->dib8000_ops.set_tune_state(fe, CT_DEMOD_START); } else { /* for everything else than CBAND we are using standard AGC */ deb_info("not tuning in CBAND - standard AGC startup\n"); dib0090_pwm_gain_reset(fe); } return 0; } static int dib809x_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); /* FIXME: if adap->id != 0, check if it is fe_adap[1] */ if (!dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config)) return -ENODEV; st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override; return 0; } static int stk809x_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 18, 0x80, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk809x_frontend1_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x82, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int nim8096md_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c; struct dvb_frontend *fe_slave = st->dib8000_ops.get_slave_frontend(adap->fe_adap[0].fe, 1); if (fe_slave) { tun_i2c = st->dib8000_ops.get_i2c_master(fe_slave, DIBX000_I2C_INTERFACE_TUNER, 1); if (dvb_attach(dib0090_register, fe_slave, tun_i2c, &dib809x_dib0090_config) == NULL) return -ENODEV; fe_slave->dvb = adap->fe_adap[0].fe->dvb; fe_slave->ops.tuner_ops.set_params = dib8096_set_param_override; } tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL) return -ENODEV; st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override; return 0; } static int nim8096md_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_frontend *fe_slave; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(20); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(1000); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 2, 18, 0x80, 0); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &dib809x_dib8000_config[0]); if (adap->fe_adap[0].fe == NULL) return -ENODEV; /* Needed to increment refcount */ if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; fe_slave = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]); state->dib8000_ops.set_slave_frontend(adap->fe_adap[0].fe, fe_slave); return fe_slave == NULL ? -ENODEV : 0; } /* TFE8096P */ static struct dibx000_agc_config dib8096p_agc_config[2] = { { .band_caps = BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 684, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 1200, .wbd_sel = 3, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 32767, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 0, .agc1_pt3 = 105, .agc1_slope1 = 0, .agc1_slope2 = 156, .agc2_pt1 = 105, .agc2_pt2 = 255, .agc2_slope1 = 54, .agc2_slope2 = 0, .alpha_mant = 28, .alpha_exp = 26, .beta_mant = 31, .beta_exp = 51, .perform_agc_softsplit = 0, } , { .band_caps = BAND_FM | BAND_VHF | BAND_CBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 732, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 1200, .wbd_sel = 3, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 32767, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 0, .agc1_pt3 = 98, .agc1_slope1 = 0, .agc1_slope2 = 167, .agc2_pt1 = 98, .agc2_pt2 = 255, .agc2_slope1 = 52, .agc2_slope2 = 0, .alpha_mant = 28, .alpha_exp = 26, .beta_mant = 31, .beta_exp = 51, .perform_agc_softsplit = 0, } }; static struct dibx000_bandwidth_config dib8096p_clock_config_12_mhz = { .internal = 108000, .sampling = 13500, .pll_prediv = 1, .pll_ratio = 9, .pll_range = 1, .pll_reset = 0, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 0, .ADClkSrc = 0, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = (0 << 25) | 0, .timf = 20199729, .xtal_hz = 12000000, }; static struct dib8000_config tfe8096p_dib8000_config = { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .update_lna = NULL, .agc_config_count = 2, .agc = dib8096p_agc_config, .pll = &dib8096p_clock_config_12_mhz, .gpio_dir = DIB8000_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB8000_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB8000_GPIO_DEFAULT_PWM_POS, .agc_control = NULL, .diversity_delay = 48, .output_mode = OUTMODE_MPEG2_FIFO, .enMpegOutput = 1, }; static struct dib0090_wbd_slope dib8096p_wbd_table[] = { { 380, 81, 850, 64, 540, 4}, { 860, 51, 866, 21, 375, 4}, {1700, 0, 250, 0, 100, 6}, {2600, 0, 250, 0, 100, 6}, { 0xFFFF, 0, 0, 0, 0, 0}, }; static struct dib0090_config tfe8096p_dib0090_config = { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = -143, .freq_offset_khz_vhf = -143, .get_adc_power = dib8090_get_adc_power, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 1, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, .force_cband_input = 0, }; struct dibx090p_best_adc { u32 timf; u32 pll_loopdiv; u32 pll_prediv; }; static int dib8096p_get_best_sampling(struct dvb_frontend *fe, struct dibx090p_best_adc *adc) { u8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1; u16 xtal = 12000; u16 fcp_min = 1900; /* PLL, Minimum Frequency of phase comparator (KHz) */ u16 fcp_max = 20000; /* PLL, Maximum Frequency of phase comparator (KHz) */ u32 fmem_max = 140000; /* 140MHz max SDRAM freq */ u32 fdem_min = 66000; u32 fcp = 0, fs = 0, fdem = 0, fmem = 0; u32 harmonic_id = 0; adc->timf = 0; adc->pll_loopdiv = loopdiv; adc->pll_prediv = prediv; deb_info("bandwidth = %d", fe->dtv_property_cache.bandwidth_hz); /* Find Min and Max prediv */ while ((xtal / max_prediv) >= fcp_min) max_prediv++; max_prediv--; min_prediv = max_prediv; while ((xtal / min_prediv) <= fcp_max) { min_prediv--; if (min_prediv == 1) break; } deb_info("MIN prediv = %d : MAX prediv = %d", min_prediv, max_prediv); min_prediv = 1; for (prediv = min_prediv; prediv < max_prediv; prediv++) { fcp = xtal / prediv; if (fcp > fcp_min && fcp < fcp_max) { for (loopdiv = 1; loopdiv < 64; loopdiv++) { fmem = ((xtal/prediv) * loopdiv); fdem = fmem / 2; fs = fdem / 4; /* test min/max system restrictions */ if ((fdem >= fdem_min) && (fmem <= fmem_max) && (fs >= fe->dtv_property_cache.bandwidth_hz / 1000)) { spur = 0; /* test fs harmonics positions */ for (harmonic_id = (fe->dtv_property_cache.frequency / (1000 * fs)); harmonic_id <= ((fe->dtv_property_cache.frequency / (1000 * fs)) + 1); harmonic_id++) { if (((fs * harmonic_id) >= (fe->dtv_property_cache.frequency / 1000 - (fe->dtv_property_cache.bandwidth_hz / 2000))) && ((fs * harmonic_id) <= (fe->dtv_property_cache.frequency / 1000 + (fe->dtv_property_cache.bandwidth_hz / 2000)))) { spur = 1; break; } } if (!spur) { adc->pll_loopdiv = loopdiv; adc->pll_prediv = prediv; adc->timf = (4260880253U / fdem) * (1 << 8); adc->timf += ((4260880253U % fdem) << 8) / fdem; deb_info("RF %6d; BW %6d; Xtal %6d; Fmem %6d; Fdem %6d; Fs %6d; Prediv %2d; Loopdiv %2d; Timf %8d;", fe->dtv_property_cache.frequency, fe->dtv_property_cache.bandwidth_hz, xtal, fmem, fdem, fs, prediv, loopdiv, adc->timf); break; } } } } if (!spur) break; } if (adc->pll_loopdiv == 0 && adc->pll_prediv == 0) return -EINVAL; return 0; } static int dib8096p_agc_startup(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; struct dibx000_bandwidth_config pll; struct dibx090p_best_adc adc; u16 target; int ret; ret = state->set_param_save(fe); if (ret < 0) return ret; memset(&pll, 0, sizeof(struct dibx000_bandwidth_config)); dib0090_pwm_gain_reset(fe); /* dib0090_get_wbd_target is returning any possible temperature compensated wbd-target */ target = (dib0090_get_wbd_target(fe) * 8 + 1) / 2; state->dib8000_ops.set_wbd_ref(fe, target); if (dib8096p_get_best_sampling(fe, &adc) == 0) { pll.pll_ratio = adc.pll_loopdiv; pll.pll_prediv = adc.pll_prediv; dib0700_set_i2c_speed(adap->dev, 200); state->dib8000_ops.update_pll(fe, &pll, fe->dtv_property_cache.bandwidth_hz / 1000, 0); state->dib8000_ops.ctrl_timf(fe, DEMOD_TIMF_SET, adc.timf); dib0700_set_i2c_speed(adap->dev, 1000); } return 0; } static int tfe8096p_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; u32 fw_version; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib8000_attach, &state->dib8000_ops)) return -ENODEV; dib0700_get_version(adap->dev, NULL, NULL, &fw_version, NULL); if (fw_version >= 0x10200) st->fw_use_new_i2c_api = 1; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x80, 1); adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x80, &tfe8096p_dib8000_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int tfe8096p_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_tuner(adap->fe_adap[0].fe); tfe8096p_dib0090_config.reset = st->dib8000_ops.tuner_sleep; tfe8096p_dib0090_config.sleep = st->dib8000_ops.tuner_sleep; tfe8096p_dib0090_config.wbd = dib8096p_wbd_table; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe8096p_dib0090_config) == NULL) return -ENODEV; st->dib8000_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096p_agc_startup; return 0; } /* STK9090M */ static int dib90x0_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) { return dib9000_fw_pid_filter(adapter->fe_adap[0].fe, index, pid, onoff); } static int dib90x0_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) { return dib9000_fw_pid_filter_ctrl(adapter->fe_adap[0].fe, onoff); } static int dib90x0_tuner_reset(struct dvb_frontend *fe, int onoff) { return dib9000_set_gpio(fe, 5, 0, !onoff); } static int dib90x0_tuner_sleep(struct dvb_frontend *fe, int onoff) { return dib9000_set_gpio(fe, 0, 0, onoff); } static int dib01x0_pmu_update(struct i2c_adapter *i2c, u16 *data, u8 len) { u8 wb[4] = { 0xc >> 8, 0xc & 0xff, 0, 0 }; u8 rb[2]; struct i2c_msg msg[2] = { {.addr = 0x1e >> 1, .flags = 0, .buf = wb, .len = 2}, {.addr = 0x1e >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2}, }; u8 index_data; dibx000_i2c_set_speed(i2c, 250); if (i2c_transfer(i2c, msg, 2) != 2) return -EIO; switch (rb[0] << 8 | rb[1]) { case 0: deb_info("Found DiB0170 rev1: This version of DiB0170 is not supported any longer.\n"); return -EIO; case 1: deb_info("Found DiB0170 rev2"); break; case 2: deb_info("Found DiB0190 rev2"); break; default: deb_info("DiB01x0 not found"); return -EIO; } for (index_data = 0; index_data < len; index_data += 2) { wb[2] = (data[index_data + 1] >> 8) & 0xff; wb[3] = (data[index_data + 1]) & 0xff; if (data[index_data] == 0) { wb[0] = (data[index_data] >> 8) & 0xff; wb[1] = (data[index_data]) & 0xff; msg[0].len = 2; if (i2c_transfer(i2c, msg, 2) != 2) return -EIO; wb[2] |= rb[0]; wb[3] |= rb[1] & ~(3 << 4); } wb[0] = (data[index_data] >> 8)&0xff; wb[1] = (data[index_data])&0xff; msg[0].len = 4; if (i2c_transfer(i2c, &msg[0], 1) != 1) return -EIO; } return 0; } static struct dib9000_config stk9090m_config = { .output_mpeg2_in_188_bytes = 1, .output_mode = OUTMODE_MPEG2_FIFO, .vcxo_timer = 279620, .timing_frequency = 20452225, .demod_clock_khz = 60000, .xtal_clock_khz = 30000, .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0), .subband = { 2, { { 240, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0008, 0x0000, 0x0008 } }, /* GPIO 3 to 1 for VHF */ { 890, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0008, 0x0000, 0x0000 } }, /* GPIO 3 to 0 for UHF */ { 0 }, }, }, .gpio_function = { { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_ON, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = (0x10 & ~0x1) | 0x20 }, { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_OFF, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = 0 | 0x21 }, }, }; static struct dib9000_config nim9090md_config[2] = { { .output_mpeg2_in_188_bytes = 1, .output_mode = OUTMODE_MPEG2_FIFO, .vcxo_timer = 279620, .timing_frequency = 20452225, .demod_clock_khz = 60000, .xtal_clock_khz = 30000, .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0), }, { .output_mpeg2_in_188_bytes = 1, .output_mode = OUTMODE_DIVERSITY, .vcxo_timer = 279620, .timing_frequency = 20452225, .demod_clock_khz = 60000, .xtal_clock_khz = 30000, .if_drives = (0 << 15) | (1 << 13) | (0 << 12) | (3 << 10) | (0 << 9) | (1 << 7) | (0 << 6) | (0 << 4) | (1 << 3) | (1 << 1) | (0), .subband = { 2, { { 240, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0006, 0x0000, 0x0006 } }, /* GPIO 1 and 2 to 1 for VHF */ { 890, { BOARD_GPIO_COMPONENT_DEMOD, BOARD_GPIO_FUNCTION_SUBBAND_GPIO, 0x0006, 0x0000, 0x0000 } }, /* GPIO 1 and 2 to 0 for UHF */ { 0 }, }, }, .gpio_function = { { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_ON, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = (0x10 & ~0x1) | 0x20 }, { .component = BOARD_GPIO_COMPONENT_DEMOD, .function = BOARD_GPIO_FUNCTION_COMPONENT_OFF, .mask = 0x10 | 0x21, .direction = 0 & ~0x21, .value = 0 | 0x21 }, }, } }; static struct dib0090_config dib9090_dib0090_config = { .io.pll_bypass = 0, .io.pll_range = 1, .io.pll_prediv = 1, .io.pll_loopdiv = 8, .io.adc_clock_ratio = 8, .io.pll_int_loop_filt = 0, .io.clock_khz = 30000, .reset = dib90x0_tuner_reset, .sleep = dib90x0_tuner_sleep, .clkouttobamse = 0, .analog_output = 0, .use_pwm_agc = 0, .clkoutdrive = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, }; static struct dib0090_config nim9090md_dib0090_config[2] = { { .io.pll_bypass = 0, .io.pll_range = 1, .io.pll_prediv = 1, .io.pll_loopdiv = 8, .io.adc_clock_ratio = 8, .io.pll_int_loop_filt = 0, .io.clock_khz = 30000, .reset = dib90x0_tuner_reset, .sleep = dib90x0_tuner_sleep, .clkouttobamse = 1, .analog_output = 0, .use_pwm_agc = 0, .clkoutdrive = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, }, { .io.pll_bypass = 0, .io.pll_range = 1, .io.pll_prediv = 1, .io.pll_loopdiv = 8, .io.adc_clock_ratio = 8, .io.pll_int_loop_filt = 0, .io.clock_khz = 30000, .reset = dib90x0_tuner_reset, .sleep = dib90x0_tuner_sleep, .clkouttobamse = 0, .analog_output = 0, .use_pwm_agc = 0, .clkoutdrive = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, } }; static int stk9090m_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; struct dib0700_state *st = adap->dev->priv; u32 fw_version; /* Make use of the new i2c functions from FW 1.20 */ dib0700_get_version(adap->dev, NULL, NULL, &fw_version, NULL); if (fw_version >= 0x10200) st->fw_use_new_i2c_api = 1; dib0700_set_i2c_speed(adap->dev, 340); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x80); if (request_firmware(&state->frontend_firmware, "dib9090.fw", &adap->dev->udev->dev)) { deb_info("%s: Upload failed. (file not found?)\n", __func__); return -ENODEV; } else { deb_info("%s: firmware read %zu bytes.\n", __func__, state->frontend_firmware->size); } stk9090m_config.microcode_B_fe_size = state->frontend_firmware->size; stk9090m_config.microcode_B_fe_buffer = state->frontend_firmware->data; adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &stk9090m_config); if (!adap->fe_adap[0].fe) { release_firmware(state->frontend_firmware); return -ENODEV; } return 0; } static int dib9090_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; struct i2c_adapter *i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe); u16 data_dib190[10] = { 1, 0x1374, 2, 0x01a2, 7, 0x0020, 0, 0x00ef, 8, 0x0486, }; if (!IS_ENABLED(CONFIG_DVB_DIB9000)) return -ENODEV; if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); if (!i2c) return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0) return -ENODEV; dib0700_set_i2c_speed(adap->dev, 1500); if (dib9000_firmware_post_pll_init(adap->fe_adap[0].fe) < 0) return -ENODEV; release_firmware(state->frontend_firmware); return 0; } static int nim9090md_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; struct dib0700_state *st = adap->dev->priv; struct i2c_adapter *i2c; struct dvb_frontend *fe_slave; u32 fw_version; /* Make use of the new i2c functions from FW 1.20 */ dib0700_get_version(adap->dev, NULL, NULL, &fw_version, NULL); if (fw_version >= 0x10200) st->fw_use_new_i2c_api = 1; dib0700_set_i2c_speed(adap->dev, 340); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(adap->dev, 72, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (request_firmware(&state->frontend_firmware, "dib9090.fw", &adap->dev->udev->dev)) { deb_info("%s: Upload failed. (file not found?)\n", __func__); return -EIO; } else { deb_info("%s: firmware read %zu bytes.\n", __func__, state->frontend_firmware->size); } nim9090md_config[0].microcode_B_fe_size = state->frontend_firmware->size; nim9090md_config[0].microcode_B_fe_buffer = state->frontend_firmware->data; nim9090md_config[1].microcode_B_fe_size = state->frontend_firmware->size; nim9090md_config[1].microcode_B_fe_buffer = state->frontend_firmware->data; dib9000_i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, 0x80); adap->fe_adap[0].fe = dvb_attach(dib9000_attach, &adap->dev->i2c_adap, 0x80, &nim9090md_config[0]); if (!adap->fe_adap[0].fe) { release_firmware(state->frontend_firmware); return -ENODEV; } i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_3_4, 0); dib9000_i2c_enumeration(i2c, 1, 0x12, 0x82); fe_slave = dvb_attach(dib9000_attach, i2c, 0x82, &nim9090md_config[1]); dib9000_set_slave_frontend(adap->fe_adap[0].fe, fe_slave); if (!fe_slave) { release_firmware(state->frontend_firmware); return -ENODEV; } return 0; } static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; struct i2c_adapter *i2c; struct dvb_frontend *fe_slave; u16 data_dib190[10] = { 1, 0x5374, 2, 0x01ae, 7, 0x0020, 0, 0x00ef, 8, 0x0406, }; if (!IS_ENABLED(CONFIG_DVB_DIB9000)) return -ENODEV; i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe); if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL) return -ENODEV; i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0); if (!i2c) return -ENODEV; if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0) return -ENODEV; dib0700_set_i2c_speed(adap->dev, 1500); if (dib9000_firmware_post_pll_init(adap->fe_adap[0].fe) < 0) return -ENODEV; fe_slave = dib9000_get_slave_frontend(adap->fe_adap[0].fe, 1); if (fe_slave != NULL) { i2c = dib9000_get_component_bus_interface(adap->fe_adap[0].fe); dib9000_set_i2c_adapter(fe_slave, i2c); i2c = dib9000_get_tuner_interface(fe_slave); if (dvb_attach(dib0090_fw_register, fe_slave, i2c, &nim9090md_dib0090_config[1]) == NULL) return -ENODEV; fe_slave->dvb = adap->fe_adap[0].fe->dvb; dib9000_fw_set_component_bus_speed(adap->fe_adap[0].fe, 1500); if (dib9000_firmware_post_pll_init(fe_slave) < 0) return -ENODEV; } release_firmware(state->frontend_firmware); return 0; } /* NIM7090 */ static int dib7090p_get_best_sampling(struct dvb_frontend *fe , struct dibx090p_best_adc *adc) { u8 spur = 0, prediv = 0, loopdiv = 0, min_prediv = 1, max_prediv = 1; u16 xtal = 12000; u32 fcp_min = 1900; /* PLL Minimum Frequency comparator KHz */ u32 fcp_max = 20000; /* PLL Maximum Frequency comparator KHz */ u32 fdem_max = 76000; u32 fdem_min = 69500; u32 fcp = 0, fs = 0, fdem = 0; u32 harmonic_id = 0; adc->pll_loopdiv = loopdiv; adc->pll_prediv = prediv; adc->timf = 0; deb_info("bandwidth = %d fdem_min =%d", fe->dtv_property_cache.bandwidth_hz, fdem_min); /* Find Min and Max prediv */ while ((xtal/max_prediv) >= fcp_min) max_prediv++; max_prediv--; min_prediv = max_prediv; while ((xtal/min_prediv) <= fcp_max) { min_prediv--; if (min_prediv == 1) break; } deb_info("MIN prediv = %d : MAX prediv = %d", min_prediv, max_prediv); min_prediv = 2; for (prediv = min_prediv ; prediv < max_prediv; prediv++) { fcp = xtal / prediv; if (fcp > fcp_min && fcp < fcp_max) { for (loopdiv = 1 ; loopdiv < 64 ; loopdiv++) { fdem = ((xtal/prediv) * loopdiv); fs = fdem / 4; /* test min/max system restrictions */ if ((fdem >= fdem_min) && (fdem <= fdem_max) && (fs >= fe->dtv_property_cache.bandwidth_hz/1000)) { spur = 0; /* test fs harmonics positions */ for (harmonic_id = (fe->dtv_property_cache.frequency / (1000*fs)) ; harmonic_id <= ((fe->dtv_property_cache.frequency / (1000*fs))+1) ; harmonic_id++) { if (((fs*harmonic_id) >= ((fe->dtv_property_cache.frequency/1000) - (fe->dtv_property_cache.bandwidth_hz/2000))) && ((fs*harmonic_id) <= ((fe->dtv_property_cache.frequency/1000) + (fe->dtv_property_cache.bandwidth_hz/2000)))) { spur = 1; break; } } if (!spur) { adc->pll_loopdiv = loopdiv; adc->pll_prediv = prediv; adc->timf = 2396745143UL/fdem*(1 << 9); adc->timf += ((2396745143UL%fdem) << 9)/fdem; deb_info("loopdiv=%i prediv=%i timf=%i", loopdiv, prediv, adc->timf); break; } } } } if (!spur) break; } if (adc->pll_loopdiv == 0 && adc->pll_prediv == 0) return -EINVAL; else return 0; } static int dib7090_agc_startup(struct dvb_frontend *fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; struct dibx000_bandwidth_config pll; u16 target; struct dibx090p_best_adc adc; int ret; ret = state->set_param_save(fe); if (ret < 0) return ret; memset(&pll, 0, sizeof(struct dibx000_bandwidth_config)); dib0090_pwm_gain_reset(fe); target = (dib0090_get_wbd_target(fe) * 8 + 1) / 2; state->dib7000p_ops.set_wbd_ref(fe, target); if (dib7090p_get_best_sampling(fe, &adc) == 0) { pll.pll_ratio = adc.pll_loopdiv; pll.pll_prediv = adc.pll_prediv; state->dib7000p_ops.update_pll(fe, &pll); state->dib7000p_ops.ctrl_timf(fe, DEMOD_TIMF_SET, adc.timf); } return 0; } static int dib7090_agc_restart(struct dvb_frontend *fe, u8 restart) { deb_info("AGC restart callback: %d", restart); if (restart == 0) /* before AGC startup */ dib0090_set_dc_servo(fe, 1); return 0; } static int tfe7790p_update_lna(struct dvb_frontend *fe, u16 agc_global) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; deb_info("update LNA: agc global=%i", agc_global); if (agc_global < 25000) { state->dib7000p_ops.set_gpio(fe, 8, 0, 0); state->dib7000p_ops.set_agc1_min(fe, 0); } else { state->dib7000p_ops.set_gpio(fe, 8, 0, 1); state->dib7000p_ops.set_agc1_min(fe, 32768); } return 0; } static struct dib0090_wbd_slope dib7090_wbd_table[] = { { 380, 81, 850, 64, 540, 4}, { 860, 51, 866, 21, 375, 4}, {1700, 0, 250, 0, 100, 6}, {2600, 0, 250, 0, 100, 6}, { 0xFFFF, 0, 0, 0, 0, 0}, }; static struct dibx000_agc_config dib7090_agc_config[2] = { { .band_caps = BAND_UHF, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 687, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 1200, .wbd_sel = 3, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 32768, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 32, .agc1_pt3 = 114, .agc1_slope1 = 143, .agc1_slope2 = 144, .agc2_pt1 = 114, .agc2_pt2 = 227, .agc2_slope1 = 116, .agc2_slope2 = 117, .alpha_mant = 18, .alpha_exp = 0, .beta_mant = 20, .beta_exp = 59, .perform_agc_softsplit = 0, } , { .band_caps = BAND_FM | BAND_VHF | BAND_CBAND, /* P_agc_use_sd_mod1=0, P_agc_use_sd_mod2=0, P_agc_freq_pwm_div=1, P_agc_inv_pwm1=0, P_agc_inv_pwm2=0, * P_agc_inh_dc_rv_est=0, P_agc_time_est=3, P_agc_freeze=0, P_agc_nb_est=5, P_agc_write=0 */ .setup = (0 << 15) | (0 << 14) | (5 << 11) | (0 << 10) | (0 << 9) | (0 << 8) | (3 << 5) | (0 << 4) | (5 << 1) | (0 << 0), .inv_gain = 732, .time_stabiliz = 10, .alpha_level = 0, .thlock = 118, .wbd_inv = 0, .wbd_ref = 1200, .wbd_sel = 3, .wbd_alpha = 5, .agc1_max = 65535, .agc1_min = 0, .agc2_max = 65535, .agc2_min = 0, .agc1_pt1 = 0, .agc1_pt2 = 0, .agc1_pt3 = 98, .agc1_slope1 = 0, .agc1_slope2 = 167, .agc2_pt1 = 98, .agc2_pt2 = 255, .agc2_slope1 = 104, .agc2_slope2 = 0, .alpha_mant = 18, .alpha_exp = 0, .beta_mant = 20, .beta_exp = 59, .perform_agc_softsplit = 0, } }; static struct dibx000_bandwidth_config dib7090_clock_config_12_mhz = { .internal = 60000, .sampling = 15000, .pll_prediv = 1, .pll_ratio = 5, .pll_range = 0, .pll_reset = 0, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 2, .sad_cfg = (3 << 14) | (1 << 12) | (524 << 0), .ifreq = (0 << 25) | 0, .timf = 20452225, .xtal_hz = 15000000, }; static struct dib7000p_config nim7090_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .update_lna = tfe7790p_update_lna, /* GPIO used is the same as TFE7790 */ .agc_config_count = 2, .agc = dib7090_agc_config, .bw = &dib7090_clock_config_12_mhz, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .pwm_freq_div = 0, .agc_control = dib7090_agc_restart, .spur_protect = 0, .disable_sample_and_hold = 0, .enable_current_mirror = 0, .diversity_delay = 0, .output_mode = OUTMODE_MPEG2_FIFO, .enMpegOutput = 1, }; static int tfe7090p_pvr_update_lna(struct dvb_frontend *fe, u16 agc_global) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dib0700_adapter_state *state = adap->priv; deb_info("TFE7090P-PVR update LNA: agc global=%i", agc_global); if (agc_global < 25000) { state->dib7000p_ops.set_gpio(fe, 5, 0, 0); state->dib7000p_ops.set_agc1_min(fe, 0); } else { state->dib7000p_ops.set_gpio(fe, 5, 0, 1); state->dib7000p_ops.set_agc1_min(fe, 32768); } return 0; } static struct dib7000p_config tfe7090pvr_dib7000p_config[2] = { { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .update_lna = tfe7090p_pvr_update_lna, .agc_config_count = 2, .agc = dib7090_agc_config, .bw = &dib7090_clock_config_12_mhz, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .pwm_freq_div = 0, .agc_control = dib7090_agc_restart, .spur_protect = 0, .disable_sample_and_hold = 0, .enable_current_mirror = 0, .diversity_delay = 0, .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK, .default_i2c_addr = 0x90, .enMpegOutput = 1, }, { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .update_lna = tfe7090p_pvr_update_lna, .agc_config_count = 2, .agc = dib7090_agc_config, .bw = &dib7090_clock_config_12_mhz, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .pwm_freq_div = 0, .agc_control = dib7090_agc_restart, .spur_protect = 0, .disable_sample_and_hold = 0, .enable_current_mirror = 0, .diversity_delay = 0, .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK, .default_i2c_addr = 0x92, .enMpegOutput = 0, } }; static struct dib0090_config nim7090_dib0090_config = { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 0, .wbd = dib7090_wbd_table, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, }; static struct dib7000p_config tfe7790p_dib7000p_config = { .output_mpeg2_in_188_bytes = 1, .hostbus_diversity = 1, .tuner_is_baseband = 1, .update_lna = tfe7790p_update_lna, .agc_config_count = 2, .agc = dib7090_agc_config, .bw = &dib7090_clock_config_12_mhz, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .pwm_freq_div = 0, .agc_control = dib7090_agc_restart, .spur_protect = 0, .disable_sample_and_hold = 0, .enable_current_mirror = 0, .diversity_delay = 0, .output_mode = OUTMODE_MPEG2_PAR_GATED_CLK, .enMpegOutput = 1, }; static struct dib0090_config tfe7790p_dib0090_config = { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = 0, .freq_offset_khz_vhf = 0, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 0, .wbd = dib7090_wbd_table, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, .force_cband_input = 0, .is_dib7090e = 0, .force_crystal_mode = 1, }; static struct dib0090_config tfe7090pvr_dib0090_config[2] = { { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = 50, .freq_offset_khz_vhf = 70, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 0, .wbd = dib7090_wbd_table, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, }, { .io.clock_khz = 12000, .io.pll_bypass = 0, .io.pll_range = 0, .io.pll_prediv = 3, .io.pll_loopdiv = 6, .io.adc_clock_ratio = 0, .io.pll_int_loop_filt = 0, .freq_offset_khz_uhf = -50, .freq_offset_khz_vhf = -70, .clkouttobamse = 1, .analog_output = 0, .wbd_vhf_offset = 0, .wbd_cband_offset = 0, .use_pwm_agc = 1, .clkoutdrive = 0, .fref_clock_ratio = 0, .wbd = dib7090_wbd_table, .ls_cfg_pad_drv = 0, .data_tx_drv = 0, .low_if = NULL, .in_soc = 1, } }; static int nim7090_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int nim7090_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_tuner(adap->fe_adap[0].fe); nim7090_dib0090_config.reset = st->dib7000p_ops.tuner_sleep; nim7090_dib0090_config.sleep = st->dib7000p_ops.tuner_sleep; nim7090_dib0090_config.get_adc_power = st->dib7000p_ops.get_adc_power; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &nim7090_dib0090_config) == NULL) return -ENODEV; st->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup; return 0; } static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; /* The TFE7090 requires the dib0700 to not be in master mode */ st->disable_streaming_master_mode = 1; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(20); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); /* initialize IC 0 */ if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } dib0700_set_i2c_speed(adap->dev, 340); adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x90, &tfe7090pvr_dib7000p_config[0]); if (adap->fe_adap[0].fe == NULL) return -ENODEV; state->dib7000p_ops.slave_reset(adap->fe_adap[0].fe); return 0; } static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *i2c; struct dib0700_adapter_state *state = adap->priv; if (adap->dev->adapter[0].fe_adap[0].fe == NULL) { err("the master dib7090 has to be initialized first"); return -ENODEV; /* the master device has not been initialized */ } if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1); if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(i2c, 0x92, &tfe7090pvr_dib7000p_config[1]); dib0700_set_i2c_speed(adap->dev, 200); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int tfe7090pvr_tuner0_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_tuner(adap->fe_adap[0].fe); tfe7090pvr_dib0090_config[0].reset = st->dib7000p_ops.tuner_sleep; tfe7090pvr_dib0090_config[0].sleep = st->dib7000p_ops.tuner_sleep; tfe7090pvr_dib0090_config[0].get_adc_power = st->dib7000p_ops.get_adc_power; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe7090pvr_dib0090_config[0]) == NULL) return -ENODEV; st->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup; return 0; } static int tfe7090pvr_tuner1_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_tuner(adap->fe_adap[0].fe); tfe7090pvr_dib0090_config[1].reset = st->dib7000p_ops.tuner_sleep; tfe7090pvr_dib0090_config[1].sleep = st->dib7000p_ops.tuner_sleep; tfe7090pvr_dib0090_config[1].get_adc_power = st->dib7000p_ops.get_adc_power; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe7090pvr_dib0090_config[1]) == NULL) return -ENODEV; st->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup; return 0; } static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; /* The TFE7790P requires the dib0700 to not be in master mode */ st->disable_streaming_master_mode = 1; dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(20); dib0700_ctrl_clock(adap->dev, 72, 1); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(20); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &tfe7790p_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &tfe7790p_dib7000p_config); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int tfe7790p_tuner_attach(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *st = adap->priv; struct i2c_adapter *tun_i2c = st->dib7000p_ops.get_i2c_tuner(adap->fe_adap[0].fe); tfe7790p_dib0090_config.reset = st->dib7000p_ops.tuner_sleep; tfe7790p_dib0090_config.sleep = st->dib7000p_ops.tuner_sleep; tfe7790p_dib0090_config.get_adc_power = st->dib7000p_ops.get_adc_power; if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &tfe7790p_dib0090_config) == NULL) return -ENODEV; st->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params; adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib7090_agc_startup; return 0; } /* STK7070PD */ static struct dib7000p_config stk7070pd_dib7000p_config[2] = { { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, }, { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &dib7070_agc_config, .bw = &dib7070_bw_config_12_mhz, .tuner_is_baseband = 1, .spur_protect = 1, .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, .hostbus_diversity = 1, } }; static void stk7070pd_init(struct dvb_usb_device *dev) { dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1); msleep(10); dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0); dib0700_ctrl_clock(dev, 72, 1); msleep(10); dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1); } static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; stk7070pd_init(adap->dev); msleep(10); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 2, 18, stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &stk7070pd_dib7000p_config[0]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap) { struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x82, &stk7070pd_dib7000p_config[1]); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int novatd_read_status_override(struct dvb_frontend *fe, enum fe_status *stat) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dvb_usb_device *dev = adap->dev; struct dib0700_state *state = dev->priv; int ret; ret = state->read_status(fe, stat); if (!ret) dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, !!(*stat & FE_HAS_LOCK)); return ret; } static int novatd_sleep_override(struct dvb_frontend* fe) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dvb_usb_device *dev = adap->dev; struct dib0700_state *state = dev->priv; /* turn off LED */ dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0); return state->sleep(fe); } /* * novatd_frontend_attach - Nova-TD specific attach * * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for * information purposes. */ static int novatd_frontend_attach(struct dvb_usb_adapter *adap) { struct dvb_usb_device *dev = adap->dev; struct dib0700_state *st = dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; if (adap->id == 0) { stk7070pd_init(dev); /* turn the power LED on, the other two off (just in case) */ dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0); dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0); dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1); if (state->dib7000p_ops.i2c_enumeration(&dev->i2c_adap, 2, 18, stk7070pd_dib7000p_config) != 0) { err("%s: state->dib7000p_ops.i2c_enumeration failed. Cannot continue\n", __func__); dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } } adap->fe_adap[0].fe = state->dib7000p_ops.init(&dev->i2c_adap, adap->id == 0 ? 0x80 : 0x82, &stk7070pd_dib7000p_config[adap->id]); if (adap->fe_adap[0].fe == NULL) return -ENODEV; st->read_status = adap->fe_adap[0].fe->ops.read_status; adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override; st->sleep = adap->fe_adap[0].fe->ops.sleep; adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override; return 0; } /* S5H1411 */ static struct s5h1411_config pinnacle_801e_config = { .output_mode = S5H1411_PARALLEL_OUTPUT, .gpio = S5H1411_GPIO_OFF, .mpeg_timing = S5H1411_MPEGTIMING_NONCONTINUOUS_NONINVERTING_CLOCK, .qam_if = S5H1411_IF_44000, .vsb_if = S5H1411_IF_44000, .inversion = S5H1411_INVERSION_OFF, .status_mode = S5H1411_DEMODLOCKING }; /* Pinnacle PCTV HD Pro 801e GPIOs map: GPIO0 - currently unknown GPIO1 - xc5000 tuner reset GPIO2 - CX25843 sleep GPIO3 - currently unknown GPIO4 - currently unknown GPIO6 - currently unknown GPIO7 - currently unknown GPIO9 - currently unknown GPIO10 - CX25843 reset */ static int s5h1411_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; /* Make use of the new i2c functions from FW 1.20 */ st->fw_use_new_i2c_api = 1; /* The s5h1411 requires the dib0700 to not be in master mode */ st->disable_streaming_master_mode = 1; /* All msleep values taken from Windows USB trace */ dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 0); dib0700_set_gpio(adap->dev, GPIO3, GPIO_OUT, 0); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(400); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(60); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 0); msleep(30); /* Put the CX25843 to sleep for now since we're in digital mode */ dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 1); /* GPIOs are initialized, do the attach */ adap->fe_adap[0].fe = dvb_attach(s5h1411_attach, &pinnacle_801e_config, &adap->dev->i2c_adap); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int dib0700_xc5000_tuner_callback(void *priv, int component, int command, int arg) { struct dvb_usb_adapter *adap = priv; if (command == XC5000_TUNER_RESET) { /* Reset the tuner */ dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 0); msleep(10); dib0700_set_gpio(adap->dev, GPIO1, GPIO_OUT, 1); msleep(10); } else { err("xc5000: unknown tuner callback command: %d\n", command); return -EINVAL; } return 0; } static struct xc5000_config s5h1411_xc5000_tunerconfig = { .i2c_address = 0x64, .if_khz = 5380, }; static int xc5000_tuner_attach(struct dvb_usb_adapter *adap) { /* FIXME: generalize & move to common area */ adap->fe_adap[0].fe->callback = dib0700_xc5000_tuner_callback; return dvb_attach(xc5000_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, &s5h1411_xc5000_tunerconfig) == NULL ? -ENODEV : 0; } static int dib0700_xc4000_tuner_callback(void *priv, int component, int command, int arg) { struct dvb_usb_adapter *adap = priv; struct dib0700_adapter_state *state = adap->priv; if (command == XC4000_TUNER_RESET) { /* Reset the tuner */ state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 0); msleep(10); state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1); } else { err("xc4000: unknown tuner callback command: %d\n", command); return -EINVAL; } return 0; } static struct dibx000_agc_config stk7700p_7000p_xc4000_agc_config = { .band_caps = BAND_UHF | BAND_VHF, .setup = 0x64, .inv_gain = 0x02c8, .time_stabiliz = 0x15, .alpha_level = 0x00, .thlock = 0x76, .wbd_inv = 0x01, .wbd_ref = 0x0b33, .wbd_sel = 0x00, .wbd_alpha = 0x02, .agc1_max = 0x00, .agc1_min = 0x00, .agc2_max = 0x9b26, .agc2_min = 0x26ca, .agc1_pt1 = 0x00, .agc1_pt2 = 0x00, .agc1_pt3 = 0x00, .agc1_slope1 = 0x00, .agc1_slope2 = 0x00, .agc2_pt1 = 0x00, .agc2_pt2 = 0x80, .agc2_slope1 = 0x1d, .agc2_slope2 = 0x1d, .alpha_mant = 0x11, .alpha_exp = 0x1b, .beta_mant = 0x17, .beta_exp = 0x33, .perform_agc_softsplit = 0x00, }; static struct dibx000_bandwidth_config stk7700p_xc4000_pll_config = { .internal = 60000, .sampling = 30000, .pll_prediv = 1, .pll_ratio = 8, .pll_range = 3, .pll_reset = 1, .pll_bypass = 0, .enable_refdiv = 0, .bypclk_div = 0, .IO_CLK_en_core = 1, .ADClkSrc = 1, .modulo = 0, .sad_cfg = (3 << 14) | (1 << 12) | 524, /* sad_cfg: refsel, sel, freq_15k */ .ifreq = 39370534, .timf = 20452225, .xtal_hz = 30000000 }; /* FIXME: none of these inputs are validated yet */ static struct dib7000p_config pctv_340e_config = { .output_mpeg2_in_188_bytes = 1, .agc_config_count = 1, .agc = &stk7700p_7000p_xc4000_agc_config, .bw = &stk7700p_xc4000_pll_config, .gpio_dir = DIB7000M_GPIO_DEFAULT_DIRECTIONS, .gpio_val = DIB7000M_GPIO_DEFAULT_VALUES, .gpio_pwm_pos = DIB7000M_GPIO_DEFAULT_PWM_POS, }; /* PCTV 340e GPIOs map: dib0700: GPIO2 - CX25843 sleep GPIO3 - CS5340 reset GPIO5 - IRD GPIO6 - Power Supply GPIO8 - LNA (1=off 0=on) GPIO10 - CX25843 reset dib7000: GPIO8 - xc4000 reset */ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct dib0700_adapter_state *state = adap->priv; if (!dvb_attach(dib7000p_attach, &state->dib7000p_ops)) return -ENODEV; /* Power Supply on */ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(50); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(100); /* Allow power supply to settle before probing */ /* cx25843 reset */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(1); /* cx25843 datasheet say 350us required */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); /* LNA off for now */ dib0700_set_gpio(adap->dev, GPIO8, GPIO_OUT, 1); /* Put the CX25843 to sleep for now since we're in digital mode */ dib0700_set_gpio(adap->dev, GPIO2, GPIO_OUT, 1); /* FIXME: not verified yet */ dib0700_ctrl_clock(adap->dev, 72, 1); msleep(500); if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) { /* Demodulator not found for some reason? */ dvb_detach(state->dib7000p_ops.set_wbd_ref); return -ENODEV; } adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x12, &pctv_340e_config); st->is_dib7000pc = 1; return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static struct xc4000_config dib7000p_xc4000_tunerconfig = { .i2c_address = 0x61, .default_pm = 1, .dvb_amplitude = 0, .set_smoothedcvbs = 0, .if_khz = 5400 }; static int xc4000_tuner_attach(struct dvb_usb_adapter *adap) { struct i2c_adapter *tun_i2c; struct dib0700_adapter_state *state = adap->priv; /* The xc4000 is not on the main i2c bus */ tun_i2c = state->dib7000p_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1); if (tun_i2c == NULL) { printk(KERN_ERR "Could not reach tuner i2c bus\n"); return 0; } /* Setup the reset callback */ adap->fe_adap[0].fe->callback = dib0700_xc4000_tuner_callback; return dvb_attach(xc4000_attach, adap->fe_adap[0].fe, tun_i2c, &dib7000p_xc4000_tunerconfig) == NULL ? -ENODEV : 0; } static struct lgdt3305_config hcw_lgdt3305_config = { .i2c_addr = 0x0e, .mpeg_mode = LGDT3305_MPEG_PARALLEL, .tpclk_edge = LGDT3305_TPCLK_FALLING_EDGE, .tpvalid_polarity = LGDT3305_TP_VALID_LOW, .deny_i2c_rptr = 0, .spectral_inversion = 1, .qam_if_khz = 6000, .vsb_if_khz = 6000, .usref_8vsb = 0x0500, }; static struct mxl5007t_config hcw_mxl5007t_config = { .xtal_freq_hz = MxL_XTAL_25_MHZ, .if_freq_hz = MxL_IF_6_MHZ, .invert_if = 1, }; /* TIGER-ATSC map: GPIO0 - LNA_CTR (H: LNA power enabled, L: LNA power disabled) GPIO1 - ANT_SEL (H: VPA, L: MCX) GPIO4 - SCL2 GPIO6 - EN_TUNER GPIO7 - SDA2 GPIO10 - DEM_RST MXL is behind LG's i2c repeater. LG is on SCL2/SDA2 gpios on the DIB */ static int lgdt3305_frontend_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; /* Make use of the new i2c functions from FW 1.20 */ st->fw_use_new_i2c_api = 1; st->disable_streaming_master_mode = 1; /* fe power enable */ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(30); /* demod reset */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); adap->fe_adap[0].fe = dvb_attach(lgdt3305_attach, &hcw_lgdt3305_config, &adap->dev->i2c_adap); return adap->fe_adap[0].fe == NULL ? -ENODEV : 0; } static int mxl5007t_tuner_attach(struct dvb_usb_adapter *adap) { return dvb_attach(mxl5007t_attach, adap->fe_adap[0].fe, &adap->dev->i2c_adap, 0x60, &hcw_mxl5007t_config) == NULL ? -ENODEV : 0; } static int xbox_one_attach(struct dvb_usb_adapter *adap) { struct dib0700_state *st = adap->dev->priv; struct i2c_client *client_demod, *client_tuner; struct dvb_usb_device *d = adap->dev; struct mn88472_config mn88472_config = { }; struct tda18250_config tda18250_config; struct i2c_board_info info; st->fw_use_new_i2c_api = 1; st->disable_streaming_master_mode = 1; /* fe power enable */ dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); msleep(30); /* demod reset */ dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); msleep(30); dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); msleep(30); /* attach demod */ mn88472_config.fe = &adap->fe_adap[0].fe; mn88472_config.i2c_wr_max = 22; mn88472_config.xtal = 20500000; mn88472_config.ts_mode = PARALLEL_TS_MODE; mn88472_config.ts_clock = FIXED_TS_CLOCK; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, "mn88472", I2C_NAME_SIZE); info.addr = 0x18; info.platform_data = &mn88472_config; request_module(info.type); client_demod = i2c_new_client_device(&d->i2c_adap, &info); if (!i2c_client_has_driver(client_demod)) goto fail_demod_device; if (!try_module_get(client_demod->dev.driver->owner)) goto fail_demod_module; st->i2c_client_demod = client_demod; adap->fe_adap[0].fe = mn88472_config.get_dvb_frontend(client_demod); /* attach tuner */ memset(&tda18250_config, 0, sizeof(tda18250_config)); tda18250_config.if_dvbt_6 = 3950; tda18250_config.if_dvbt_7 = 4450; tda18250_config.if_dvbt_8 = 4950; tda18250_config.if_dvbc_6 = 4950; tda18250_config.if_dvbc_8 = 4950; tda18250_config.if_atsc = 4079; tda18250_config.loopthrough = true; tda18250_config.xtal_freq = TDA18250_XTAL_FREQ_27MHZ; tda18250_config.fe = adap->fe_adap[0].fe; memset(&info, 0, sizeof(struct i2c_board_info)); strscpy(info.type, "tda18250", I2C_NAME_SIZE); info.addr = 0x60; info.platform_data = &tda18250_config; request_module(info.type); client_tuner = i2c_new_client_device(&adap->dev->i2c_adap, &info); if (!i2c_client_has_driver(client_tuner)) goto fail_tuner_device; if (!try_module_get(client_tuner->dev.driver->owner)) goto fail_tuner_module; st->i2c_client_tuner = client_tuner; return 0; fail_tuner_module: i2c_unregister_device(client_tuner); fail_tuner_device: module_put(client_demod->dev.driver->owner); fail_demod_module: i2c_unregister_device(client_demod); fail_demod_device: return -ENODEV; } /* DVB-USB and USB stuff follows */ enum { DIBCOM_STK7700P, DIBCOM_STK7700P_PC, HAUPPAUGE_NOVA_T_500, HAUPPAUGE_NOVA_T_500_2, HAUPPAUGE_NOVA_T_STICK, AVERMEDIA_VOLAR, COMPRO_VIDEOMATE_U500, UNIWILL_STK7700P, LEADTEK_WINFAST_DTV_DONGLE_STK7700P, HAUPPAUGE_NOVA_T_STICK_2, AVERMEDIA_VOLAR_2, PINNACLE_PCTV2000E, TERRATEC_CINERGY_DT_XS_DIVERSITY, HAUPPAUGE_NOVA_TD_STICK, DIBCOM_STK7700D, DIBCOM_STK7070P, PINNACLE_PCTV_DVB_T_FLASH, DIBCOM_STK7070PD, PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T, COMPRO_VIDEOMATE_U500_PC, AVERMEDIA_EXPRESS, GIGABYTE_U7000, ULTIMA_ARTEC_T14BR, ASUS_U3000, ASUS_U3100, HAUPPAUGE_NOVA_T_STICK_3, HAUPPAUGE_MYTV_T, TERRATEC_CINERGY_HT_USB_XE, PINNACLE_EXPRESSCARD_320CX, PINNACLE_PCTV72E, PINNACLE_PCTV73E, YUAN_EC372S, TERRATEC_CINERGY_HT_EXPRESS, TERRATEC_CINERGY_T_XXS, LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2, HAUPPAUGE_NOVA_TD_STICK_52009, HAUPPAUGE_NOVA_T_500_3, GIGABYTE_U8000, YUAN_STK7700PH, ASUS_U3000H, PINNACLE_PCTV801E, PINNACLE_PCTV801E_SE, TERRATEC_CINERGY_T_EXPRESS, TERRATEC_CINERGY_DT_XS_DIVERSITY_2, SONY_PLAYTV, YUAN_PD378S, HAUPPAUGE_TIGER_ATSC, HAUPPAUGE_TIGER_ATSC_B210, YUAN_MC770, ELGATO_EYETV_DTT, ELGATO_EYETV_DTT_Dlx, LEADTEK_WINFAST_DTV_DONGLE_H, TERRATEC_T3, TERRATEC_T5, YUAN_STK7700D, YUAN_STK7700D_2, PINNACLE_PCTV73A, PCTV_PINNACLE_PCTV73ESE, PCTV_PINNACLE_PCTV282E, DIBCOM_STK7770P, TERRATEC_CINERGY_T_XXS_2, DIBCOM_STK807XPVR, DIBCOM_STK807XP, PIXELVIEW_SBTVD, EVOLUTEPC_TVWAY_PLUS, PINNACLE_PCTV73ESE, PINNACLE_PCTV282E, DIBCOM_STK8096GP, ELGATO_EYETV_DIVERSITY, DIBCOM_NIM9090M, DIBCOM_NIM8096MD, DIBCOM_NIM9090MD, DIBCOM_NIM7090, DIBCOM_TFE7090PVR, TECHNISAT_AIRSTAR_TELESTICK_2, MEDION_CREATIX_CTX1921, PINNACLE_PCTV340E, PINNACLE_PCTV340E_SE, DIBCOM_TFE7790P, DIBCOM_TFE8096P, ELGATO_EYETV_DTT_2, PCTV_2002E, PCTV_2002E_SE, PCTV_DIBCOM_STK8096PVR, DIBCOM_STK8096PVR, HAMA_DVBT_HYBRID, MICROSOFT_XBOX_ONE_TUNER, }; struct usb_device_id dib0700_usb_id_table[] = { DVB_USB_DEV(DIBCOM, DIBCOM_STK7700P), DVB_USB_DEV(DIBCOM, DIBCOM_STK7700P_PC), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_500), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_500_2), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_STICK), DVB_USB_DEV(AVERMEDIA, AVERMEDIA_VOLAR), DVB_USB_DEV(COMPRO, COMPRO_VIDEOMATE_U500), DVB_USB_DEV(UNIWILL, UNIWILL_STK7700P), DVB_USB_DEV(LEADTEK, LEADTEK_WINFAST_DTV_DONGLE_STK7700P), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_STICK_2), DVB_USB_DEV(AVERMEDIA, AVERMEDIA_VOLAR_2), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV2000E), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_DT_XS_DIVERSITY), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_TD_STICK), DVB_USB_DEV(DIBCOM, DIBCOM_STK7700D), DVB_USB_DEV(DIBCOM, DIBCOM_STK7070P), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_DVB_T_FLASH), DVB_USB_DEV(DIBCOM, DIBCOM_STK7070PD), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T), DVB_USB_DEV(COMPRO, COMPRO_VIDEOMATE_U500_PC), DVB_USB_DEV(AVERMEDIA, AVERMEDIA_EXPRESS), DVB_USB_DEV(GIGABYTE, GIGABYTE_U7000), DVB_USB_DEV(ULTIMA_ELECTRONIC, ULTIMA_ARTEC_T14BR), DVB_USB_DEV(ASUS, ASUS_U3000), DVB_USB_DEV(ASUS, ASUS_U3100), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_STICK_3), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_MYTV_T), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_HT_USB_XE), DVB_USB_DEV(PINNACLE, PINNACLE_EXPRESSCARD_320CX), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV72E), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV73E), DVB_USB_DEV(YUAN, YUAN_EC372S), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_HT_EXPRESS), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T_XXS), DVB_USB_DEV(LEADTEK, LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_TD_STICK_52009), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_NOVA_T_500_3), DVB_USB_DEV(GIGABYTE, GIGABYTE_U8000), DVB_USB_DEV(YUAN, YUAN_STK7700PH), DVB_USB_DEV(ASUS, ASUS_U3000H), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV801E), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV801E_SE), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T_EXPRESS), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_DT_XS_DIVERSITY_2), DVB_USB_DEV(SONY, SONY_PLAYTV), DVB_USB_DEV(YUAN, YUAN_PD378S), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_TIGER_ATSC), DVB_USB_DEV(HAUPPAUGE, HAUPPAUGE_TIGER_ATSC_B210), DVB_USB_DEV(YUAN, YUAN_MC770), DVB_USB_DEV(ELGATO, ELGATO_EYETV_DTT), DVB_USB_DEV(ELGATO, ELGATO_EYETV_DTT_Dlx), DVB_USB_DEV(LEADTEK, LEADTEK_WINFAST_DTV_DONGLE_H), DVB_USB_DEV(TERRATEC, TERRATEC_T3), DVB_USB_DEV(TERRATEC, TERRATEC_T5), DVB_USB_DEV(YUAN, YUAN_STK7700D), DVB_USB_DEV(YUAN, YUAN_STK7700D_2), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV73A), DVB_USB_DEV(PCTV, PCTV_PINNACLE_PCTV73ESE), DVB_USB_DEV(PCTV, PCTV_PINNACLE_PCTV282E), DVB_USB_DEV(DIBCOM, DIBCOM_STK7770P), DVB_USB_DEV(TERRATEC, TERRATEC_CINERGY_T_XXS_2), DVB_USB_DEV(DIBCOM, DIBCOM_STK807XPVR), DVB_USB_DEV(DIBCOM, DIBCOM_STK807XP), DVB_USB_DEV_VER(PIXELVIEW, PIXELVIEW_SBTVD, 0x000, 0x3f00), DVB_USB_DEV(EVOLUTEPC, EVOLUTEPC_TVWAY_PLUS), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV73ESE), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV282E), DVB_USB_DEV(DIBCOM, DIBCOM_STK8096GP), DVB_USB_DEV(ELGATO, ELGATO_EYETV_DIVERSITY), DVB_USB_DEV(DIBCOM, DIBCOM_NIM9090M), DVB_USB_DEV(DIBCOM, DIBCOM_NIM8096MD), DVB_USB_DEV(DIBCOM, DIBCOM_NIM9090MD), DVB_USB_DEV(DIBCOM, DIBCOM_NIM7090), DVB_USB_DEV(DIBCOM, DIBCOM_TFE7090PVR), DVB_USB_DEV(TECHNISAT, TECHNISAT_AIRSTAR_TELESTICK_2), DVB_USB_DEV(MEDION, MEDION_CREATIX_CTX1921), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV340E), DVB_USB_DEV(PINNACLE, PINNACLE_PCTV340E_SE), DVB_USB_DEV(DIBCOM, DIBCOM_TFE7790P), DVB_USB_DEV(DIBCOM, DIBCOM_TFE8096P), DVB_USB_DEV(ELGATO, ELGATO_EYETV_DTT_2), DVB_USB_DEV(PCTV, PCTV_2002E), DVB_USB_DEV(PCTV, PCTV_2002E_SE), DVB_USB_DEV(PCTV, PCTV_DIBCOM_STK8096PVR), DVB_USB_DEV(DIBCOM, DIBCOM_STK8096PVR), DVB_USB_DEV(HAMA, HAMA_DVBT_HYBRID), DVB_USB_DEV(MICROSOFT, MICROSOFT_XBOX_ONE_TUNER), { } }; MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table); #define DIB0700_DEFAULT_DEVICE_PROPERTIES \ .caps = DVB_USB_IS_AN_I2C_ADAPTER, \ .usb_ctrl = DEVICE_SPECIFIC, \ .firmware = "dvb-usb-dib0700-1.20.fw", \ .download_firmware = dib0700_download_firmware, \ .no_reconnect = 1, \ .size_of_priv = sizeof(struct dib0700_state), \ .i2c_algo = &dib0700_i2c_algo, \ .identify_state = dib0700_identify_state #define DIB0700_DEFAULT_STREAMING_CONFIG(ep) \ .streaming_ctrl = dib0700_streaming_ctrl, \ .stream = { \ .type = USB_BULK, \ .count = 4, \ .endpoint = ep, \ .u = { \ .bulk = { \ .buffersize = 39480, \ } \ } \ } #define DIB0700_NUM_FRONTENDS(n) \ .num_frontends = n, \ .size_of_priv = sizeof(struct dib0700_adapter_state) struct dvb_usb_device_properties dib0700_devices[] = { { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk7700p_pid_filter, .pid_filter_ctrl = stk7700p_pid_filter_ctrl, .frontend_attach = stk7700p_frontend_attach, .tuner_attach = stk7700p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 8, .devices = { { "DiBcom STK7700P reference design", { &dib0700_usb_id_table[DIBCOM_STK7700P], &dib0700_usb_id_table[DIBCOM_STK7700P_PC] }, { NULL }, }, { "Hauppauge Nova-T Stick", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_STICK], &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_STICK_2], NULL }, { NULL }, }, { "AVerMedia AVerTV DVB-T Volar", { &dib0700_usb_id_table[AVERMEDIA_VOLAR], &dib0700_usb_id_table[AVERMEDIA_VOLAR_2] }, { NULL }, }, { "Compro Videomate U500", { &dib0700_usb_id_table[COMPRO_VIDEOMATE_U500], &dib0700_usb_id_table[COMPRO_VIDEOMATE_U500_PC] }, { NULL }, }, { "Uniwill STK7700P based (Hama and others)", { &dib0700_usb_id_table[UNIWILL_STK7700P], NULL }, { NULL }, }, { "Leadtek Winfast DTV Dongle (STK7700P based)", { &dib0700_usb_id_table[LEADTEK_WINFAST_DTV_DONGLE_STK7700P], &dib0700_usb_id_table[LEADTEK_WINFAST_DTV_DONGLE_STK7700P_2] }, { NULL }, }, { "AVerMedia AVerTV DVB-T Express", { &dib0700_usb_id_table[AVERMEDIA_EXPRESS] }, { NULL }, }, { "Gigabyte U7000", { &dib0700_usb_id_table[GIGABYTE_U7000], NULL }, { NULL }, } }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = bristol_frontend_attach, .tuner_attach = bristol_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = bristol_frontend_attach, .tuner_attach = bristol_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 1, .devices = { { "Hauppauge Nova-T 500 Dual DVB-T", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_500], &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_500_2], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7700d_frontend_attach, .tuner_attach = stk7700d_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7700d_frontend_attach, .tuner_attach = stk7700d_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 5, .devices = { { "Pinnacle PCTV 2000e", { &dib0700_usb_id_table[PINNACLE_PCTV2000E], NULL }, { NULL }, }, { "Terratec Cinergy DT XS Diversity", { &dib0700_usb_id_table[TERRATEC_CINERGY_DT_XS_DIVERSITY], NULL }, { NULL }, }, { "Hauppauge Nova-TD Stick/Elgato Eye-TV Diversity", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_TD_STICK], NULL }, { NULL }, }, { "DiBcom STK7700D reference design", { &dib0700_usb_id_table[DIBCOM_STK7700D], NULL }, { NULL }, }, { "YUAN High-Tech DiBcom STK7700D", { &dib0700_usb_id_table[YUAN_STK7700D_2], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7700P2_frontend_attach, .tuner_attach = stk7700d_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 3, .devices = { { "ASUS My Cinema U3000 Mini DVBT Tuner", { &dib0700_usb_id_table[ASUS_U3000], NULL }, { NULL }, }, { "Yuan EC372S", { &dib0700_usb_id_table[YUAN_EC372S], NULL }, { NULL }, }, { "Terratec Cinergy T Express", { &dib0700_usb_id_table[TERRATEC_CINERGY_T_EXPRESS], NULL }, { NULL }, } }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070p_frontend_attach, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 12, .devices = { { "DiBcom STK7070P reference design", { &dib0700_usb_id_table[DIBCOM_STK7070P], NULL }, { NULL }, }, { "Pinnacle PCTV DVB-T Flash Stick", { &dib0700_usb_id_table[PINNACLE_PCTV_DVB_T_FLASH], NULL }, { NULL }, }, { "Artec T14BR DVB-T", { &dib0700_usb_id_table[ULTIMA_ARTEC_T14BR], NULL }, { NULL }, }, { "ASUS My Cinema U3100 Mini DVBT Tuner", { &dib0700_usb_id_table[ASUS_U3100], NULL }, { NULL }, }, { "Hauppauge Nova-T Stick", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_STICK_3], NULL }, { NULL }, }, { "Hauppauge Nova-T MyTV.t", { &dib0700_usb_id_table[HAUPPAUGE_MYTV_T], NULL }, { NULL }, }, { "Pinnacle PCTV 72e", { &dib0700_usb_id_table[PINNACLE_PCTV72E], NULL }, { NULL }, }, { "Pinnacle PCTV 73e", { &dib0700_usb_id_table[PINNACLE_PCTV73E], NULL }, { NULL }, }, { "Elgato EyeTV DTT", { &dib0700_usb_id_table[ELGATO_EYETV_DTT], NULL }, { NULL }, }, { "Yuan PD378S", { &dib0700_usb_id_table[YUAN_PD378S], NULL }, { NULL }, }, { "Elgato EyeTV Dtt Dlx PD378S", { &dib0700_usb_id_table[ELGATO_EYETV_DTT_Dlx], NULL }, { NULL }, }, { "Elgato EyeTV DTT rev. 2", { &dib0700_usb_id_table[ELGATO_EYETV_DTT_2], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070p_frontend_attach, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 3, .devices = { { "Pinnacle PCTV 73A", { &dib0700_usb_id_table[PINNACLE_PCTV73A], NULL }, { NULL }, }, { "Pinnacle PCTV 73e SE", { &dib0700_usb_id_table[PCTV_PINNACLE_PCTV73ESE], &dib0700_usb_id_table[PINNACLE_PCTV73ESE], NULL }, { NULL }, }, { "Pinnacle PCTV 282e", { &dib0700_usb_id_table[PCTV_PINNACLE_PCTV282E], &dib0700_usb_id_table[PINNACLE_PCTV282E], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = novatd_frontend_attach, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = novatd_frontend_attach, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 3, .devices = { { "Hauppauge Nova-TD Stick (52009)", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_TD_STICK_52009], NULL }, { NULL }, }, { "PCTV 2002e", { &dib0700_usb_id_table[PCTV_2002E], NULL }, { NULL }, }, { "PCTV 2002e SE", { &dib0700_usb_id_table[PCTV_2002E_SE], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach0, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach1, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 5, .devices = { { "DiBcom STK7070PD reference design", { &dib0700_usb_id_table[DIBCOM_STK7070PD], NULL }, { NULL }, }, { "Pinnacle PCTV Dual DVB-T Diversity Stick", { &dib0700_usb_id_table[PINNACLE_PCTV_DUAL_DIVERSITY_DVB_T], NULL }, { NULL }, }, { "Hauppauge Nova-TD-500 (84xxx)", { &dib0700_usb_id_table[HAUPPAUGE_NOVA_T_500_3], NULL }, { NULL }, }, { "Terratec Cinergy DT USB XS Diversity/ T5", { &dib0700_usb_id_table[TERRATEC_CINERGY_DT_XS_DIVERSITY_2], &dib0700_usb_id_table[TERRATEC_T5], NULL}, { NULL }, }, { "Sony PlayTV", { &dib0700_usb_id_table[SONY_PLAYTV], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach0, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7070pd_frontend_attach1, .tuner_attach = dib7070p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, } }, .num_device_descs = 1, .devices = { { "Elgato EyeTV Diversity", { &dib0700_usb_id_table[ELGATO_EYETV_DIVERSITY], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_NEC_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7700ph_frontend_attach, .tuner_attach = stk7700ph_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 10, .devices = { { "Terratec Cinergy HT USB XE", { &dib0700_usb_id_table[TERRATEC_CINERGY_HT_USB_XE], NULL }, { NULL }, }, { "Pinnacle Expresscard 320cx", { &dib0700_usb_id_table[PINNACLE_EXPRESSCARD_320CX], NULL }, { NULL }, }, { "Terratec Cinergy HT Express", { &dib0700_usb_id_table[TERRATEC_CINERGY_HT_EXPRESS], NULL }, { NULL }, }, { "Gigabyte U8000-RH", { &dib0700_usb_id_table[GIGABYTE_U8000], NULL }, { NULL }, }, { "YUAN High-Tech STK7700PH", { &dib0700_usb_id_table[YUAN_STK7700PH], NULL }, { NULL }, }, { "Asus My Cinema-U3000Hybrid", { &dib0700_usb_id_table[ASUS_U3000H], NULL }, { NULL }, }, { "YUAN High-Tech MC770", { &dib0700_usb_id_table[YUAN_MC770], NULL }, { NULL }, }, { "Leadtek WinFast DTV Dongle H", { &dib0700_usb_id_table[LEADTEK_WINFAST_DTV_DONGLE_H], NULL }, { NULL }, }, { "YUAN High-Tech STK7700D", { &dib0700_usb_id_table[YUAN_STK7700D], NULL }, { NULL }, }, { "Hama DVB=T Hybrid USB Stick", { &dib0700_usb_id_table[HAMA_DVBT_HYBRID], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = s5h1411_frontend_attach, .tuner_attach = xc5000_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 2, .devices = { { "Pinnacle PCTV HD Pro USB Stick", { &dib0700_usb_id_table[PINNACLE_PCTV801E], NULL }, { NULL }, }, { "Pinnacle PCTV HD USB Stick", { &dib0700_usb_id_table[PINNACLE_PCTV801E_SE], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = lgdt3305_frontend_attach, .tuner_attach = mxl5007t_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 2, .devices = { { "Hauppauge ATSC MiniCard (B200)", { &dib0700_usb_id_table[HAUPPAUGE_TIGER_ATSC], NULL }, { NULL }, }, { "Hauppauge ATSC MiniCard (B210)", { &dib0700_usb_id_table[HAUPPAUGE_TIGER_ATSC_B210], NULL }, { NULL }, }, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = stk7770p_frontend_attach, .tuner_attach = dib7770p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 4, .devices = { { "DiBcom STK7770P reference design", { &dib0700_usb_id_table[DIBCOM_STK7770P], NULL }, { NULL }, }, { "Terratec Cinergy T USB XXS (HD)/ T3", { &dib0700_usb_id_table[TERRATEC_CINERGY_T_XXS], &dib0700_usb_id_table[TERRATEC_T3], &dib0700_usb_id_table[TERRATEC_CINERGY_T_XXS_2], NULL}, { NULL }, }, { "TechniSat AirStar TeleStick 2", { &dib0700_usb_id_table[TECHNISAT_AIRSTAR_TELESTICK_2], NULL }, { NULL }, }, { "Medion CTX1921 DVB-T USB", { &dib0700_usb_id_table[MEDION_CREATIX_CTX1921], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk807x_frontend_attach, .tuner_attach = dib807x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 3, .devices = { { "DiBcom STK807xP reference design", { &dib0700_usb_id_table[DIBCOM_STK807XP], NULL }, { NULL }, }, { "Prolink Pixelview SBTVD", { &dib0700_usb_id_table[PIXELVIEW_SBTVD], NULL }, { NULL }, }, { "EvolutePC TVWay+", { &dib0700_usb_id_table[EVOLUTEPC_TVWAY_PLUS], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_NEC_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk807xpvr_frontend_attach0, .tuner_attach = dib807x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk807xpvr_frontend_attach1, .tuner_attach = dib807x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom STK807xPVR reference design", { &dib0700_usb_id_table[DIBCOM_STK807XPVR], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk809x_frontend_attach, .tuner_attach = dib809x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom STK8096GP reference design", { &dib0700_usb_id_table[DIBCOM_STK8096GP], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = dib90x0_pid_filter, .pid_filter_ctrl = dib90x0_pid_filter_ctrl, .frontend_attach = stk9090m_frontend_attach, .tuner_attach = dib9090_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom STK9090M reference design", { &dib0700_usb_id_table[DIBCOM_NIM9090M], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = nim8096md_frontend_attach, .tuner_attach = nim8096md_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom NIM8096MD reference design", { &dib0700_usb_id_table[DIBCOM_NIM8096MD], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = dib90x0_pid_filter, .pid_filter_ctrl = dib90x0_pid_filter_ctrl, .frontend_attach = nim9090md_frontend_attach, .tuner_attach = nim9090md_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom NIM9090MD reference design", { &dib0700_usb_id_table[DIBCOM_NIM9090MD], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = nim7090_frontend_attach, .tuner_attach = nim7090_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom NIM7090 reference design", { &dib0700_usb_id_table[DIBCOM_NIM7090], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = tfe7090pvr_frontend0_attach, .tuner_attach = tfe7090pvr_tuner0_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), }}, }, { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = tfe7090pvr_frontend1_attach, .tuner_attach = tfe7090pvr_tuner1_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 1, .devices = { { "DiBcom TFE7090PVR reference design", { &dib0700_usb_id_table[DIBCOM_TFE7090PVR], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = pctv340e_frontend_attach, .tuner_attach = xc4000_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), }}, }, }, .num_device_descs = 2, .devices = { { "Pinnacle PCTV 340e HD Pro USB Stick", { &dib0700_usb_id_table[PINNACLE_PCTV340E], NULL }, { NULL }, }, { "Pinnacle PCTV Hybrid Stick Solo", { &dib0700_usb_id_table[PINNACLE_PCTV340E_SE], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, .frontend_attach = tfe7790p_frontend_attach, .tuner_attach = tfe7790p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), } }, }, }, .num_device_descs = 1, .devices = { { "DiBcom TFE7790P reference design", { &dib0700_usb_id_table[DIBCOM_TFE7790P], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = tfe8096p_frontend_attach, .tuner_attach = tfe8096p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), } }, }, }, .num_device_descs = 1, .devices = { { "DiBcom TFE8096P reference design", { &dib0700_usb_id_table[DIBCOM_TFE8096P], NULL }, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 2, .adapter = { { .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk809x_frontend_attach, .tuner_attach = dib809x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), } }, .size_of_priv = sizeof(struct dib0700_adapter_state), }, { .num_frontends = 1, .fe = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter = stk80xx_pid_filter, .pid_filter_ctrl = stk80xx_pid_filter_ctrl, .frontend_attach = stk809x_frontend1_attach, .tuner_attach = dib809x_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x03), } }, .size_of_priv = sizeof(struct dib0700_adapter_state), }, }, .num_device_descs = 1, .devices = { { "DiBcom STK8096-PVR reference design", { &dib0700_usb_id_table[PCTV_DIBCOM_STK8096PVR], &dib0700_usb_id_table[DIBCOM_STK8096PVR], NULL}, { NULL }, }, }, .rc.core = { .rc_interval = DEFAULT_RC_INTERVAL, .rc_codes = RC_MAP_DIB0700_RC5_TABLE, .module_name = "dib0700", .rc_query = dib0700_rc_query_old_firmware, .allowed_protos = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_NEC, .change_protocol = dib0700_change_protocol, }, }, { DIB0700_DEFAULT_DEVICE_PROPERTIES, .num_adapters = 1, .adapter = { { DIB0700_NUM_FRONTENDS(1), .fe = {{ .frontend_attach = xbox_one_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x82), } }, }, }, .num_device_descs = 1, .devices = { { "Microsoft Xbox One Digital TV Tuner", { &dib0700_usb_id_table[MICROSOFT_XBOX_ONE_TUNER], NULL }, { NULL }, }, }, }, }; int dib0700_device_count = ARRAY_SIZE(dib0700_devices); |
| 2 2 96 113 10 41 7 34 18 13 5 5 14 125 3 2 96 118 2 5 3 112 158 2 158 3 1 2 1 1 167 2 28 162 3 159 1 3 2 70 6 55 12 18 50 12 55 55 25 25 19 1 1 17 259 41 382 415 16 2 10 2 1 8 1 25 2 6 74 73 8 73 169 25 8 37 4 148 193 138 130 6 18 7 146 148 68 128 111 138 189 164 163 164 26 19 30 170 172 20 3 19 2 176 1 174 175 157 22 174 174 99 17 84 7 83 71 20 68 68 41 30 68 68 102 74 44 32 31 84 1 17 74 26 3 41 74 6 74 21 74 73 16 11 11 3 61 17 58 85 188 189 36 36 36 187 1 188 187 186 1 163 26 164 3 161 26 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 | // SPDX-License-Identifier: GPL-2.0-or-later /* * af_alg: User-space algorithm interface * * This file provides the user-space API for algorithms. * * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> */ #include <linux/atomic.h> #include <crypto/if_alg.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/key.h> #include <linux/key-type.h> #include <linux/list.h> #include <linux/module.h> #include <linux/net.h> #include <linux/rwsem.h> #include <linux/sched.h> #include <linux/sched/signal.h> #include <linux/security.h> #include <linux/string.h> #include <keys/user-type.h> #include <keys/trusted-type.h> #include <keys/encrypted-type.h> struct alg_type_list { const struct af_alg_type *type; struct list_head list; }; static struct proto alg_proto = { .name = "ALG", .owner = THIS_MODULE, .obj_size = sizeof(struct alg_sock), }; static LIST_HEAD(alg_types); static DECLARE_RWSEM(alg_types_sem); static const struct af_alg_type *alg_get_type(const char *name) { const struct af_alg_type *type = ERR_PTR(-ENOENT); struct alg_type_list *node; down_read(&alg_types_sem); list_for_each_entry(node, &alg_types, list) { if (strcmp(node->type->name, name)) continue; if (try_module_get(node->type->owner)) type = node->type; break; } up_read(&alg_types_sem); return type; } int af_alg_register_type(const struct af_alg_type *type) { struct alg_type_list *node; int err = -EEXIST; down_write(&alg_types_sem); list_for_each_entry(node, &alg_types, list) { if (!strcmp(node->type->name, type->name)) goto unlock; } node = kmalloc(sizeof(*node), GFP_KERNEL); err = -ENOMEM; if (!node) goto unlock; type->ops->owner = THIS_MODULE; if (type->ops_nokey) type->ops_nokey->owner = THIS_MODULE; node->type = type; list_add(&node->list, &alg_types); err = 0; unlock: up_write(&alg_types_sem); return err; } EXPORT_SYMBOL_GPL(af_alg_register_type); int af_alg_unregister_type(const struct af_alg_type *type) { struct alg_type_list *node; int err = -ENOENT; down_write(&alg_types_sem); list_for_each_entry(node, &alg_types, list) { if (strcmp(node->type->name, type->name)) continue; list_del(&node->list); kfree(node); err = 0; break; } up_write(&alg_types_sem); return err; } EXPORT_SYMBOL_GPL(af_alg_unregister_type); static void alg_do_release(const struct af_alg_type *type, void *private) { if (!type) return; type->release(private); module_put(type->owner); } int af_alg_release(struct socket *sock) { if (sock->sk) { sock_put(sock->sk); sock->sk = NULL; } return 0; } EXPORT_SYMBOL_GPL(af_alg_release); void af_alg_release_parent(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); unsigned int nokey = atomic_read(&ask->nokey_refcnt); sk = ask->parent; ask = alg_sk(sk); if (nokey) atomic_dec(&ask->nokey_refcnt); if (atomic_dec_and_test(&ask->refcnt)) sock_put(sk); } EXPORT_SYMBOL_GPL(af_alg_release_parent); static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY; struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct sockaddr_alg_new *sa = (void *)uaddr; const struct af_alg_type *type; void *private; int err; if (sock->state == SS_CONNECTED) return -EINVAL; BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) != offsetof(struct sockaddr_alg, salg_name)); BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa)); if (addr_len < sizeof(*sa) + 1) return -EINVAL; /* If caller uses non-allowed flag, return error. */ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed)) return -EINVAL; sa->salg_type[sizeof(sa->salg_type) - 1] = 0; sa->salg_name[addr_len - sizeof(*sa) - 1] = 0; type = alg_get_type(sa->salg_type); if (PTR_ERR(type) == -ENOENT) { request_module("algif-%s", sa->salg_type); type = alg_get_type(sa->salg_type); } if (IS_ERR(type)) return PTR_ERR(type); private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask); if (IS_ERR(private)) { module_put(type->owner); return PTR_ERR(private); } err = -EBUSY; lock_sock(sk); if (atomic_read(&ask->refcnt)) goto unlock; swap(ask->type, type); swap(ask->private, private); err = 0; unlock: release_sock(sk); alg_do_release(type, private); return err; } static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type = ask->type; u8 *key; int err; key = sock_kmalloc(sk, keylen, GFP_KERNEL); if (!key) return -ENOMEM; err = -EFAULT; if (copy_from_sockptr(key, ukey, keylen)) goto out; err = type->setkey(ask->private, key, keylen); out: sock_kzfree_s(sk, key, keylen); return err; } #ifdef CONFIG_KEYS static const u8 *key_data_ptr_user(const struct key *key, unsigned int *datalen) { const struct user_key_payload *ukp; ukp = user_key_payload_locked(key); if (IS_ERR_OR_NULL(ukp)) return ERR_PTR(-EKEYREVOKED); *datalen = key->datalen; return ukp->data; } static const u8 *key_data_ptr_encrypted(const struct key *key, unsigned int *datalen) { const struct encrypted_key_payload *ekp; ekp = dereference_key_locked(key); if (IS_ERR_OR_NULL(ekp)) return ERR_PTR(-EKEYREVOKED); *datalen = ekp->decrypted_datalen; return ekp->decrypted_data; } static const u8 *key_data_ptr_trusted(const struct key *key, unsigned int *datalen) { const struct trusted_key_payload *tkp; tkp = dereference_key_locked(key); if (IS_ERR_OR_NULL(tkp)) return ERR_PTR(-EKEYREVOKED); *datalen = tkp->key_len; return tkp->key; } static struct key *lookup_key(key_serial_t serial) { key_ref_t key_ref; key_ref = lookup_user_key(serial, 0, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) return ERR_CAST(key_ref); return key_ref_to_ptr(key_ref); } static int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval, unsigned int optlen) { const struct af_alg_type *type = ask->type; u8 *key_data = NULL; unsigned int key_datalen; key_serial_t serial; struct key *key; const u8 *ret; int err; if (optlen != sizeof(serial)) return -EINVAL; if (copy_from_sockptr(&serial, optval, optlen)) return -EFAULT; key = lookup_key(serial); if (IS_ERR(key)) return PTR_ERR(key); down_read(&key->sem); ret = ERR_PTR(-ENOPROTOOPT); if (!strcmp(key->type->name, "user") || !strcmp(key->type->name, "logon")) { ret = key_data_ptr_user(key, &key_datalen); } else if (IS_REACHABLE(CONFIG_ENCRYPTED_KEYS) && !strcmp(key->type->name, "encrypted")) { ret = key_data_ptr_encrypted(key, &key_datalen); } else if (IS_REACHABLE(CONFIG_TRUSTED_KEYS) && !strcmp(key->type->name, "trusted")) { ret = key_data_ptr_trusted(key, &key_datalen); } if (IS_ERR(ret)) { up_read(&key->sem); key_put(key); return PTR_ERR(ret); } key_data = sock_kmalloc(&ask->sk, key_datalen, GFP_KERNEL); if (!key_data) { up_read(&key->sem); key_put(key); return -ENOMEM; } memcpy(key_data, ret, key_datalen); up_read(&key->sem); key_put(key); err = type->setkey(ask->private, key_data, key_datalen); sock_kzfree_s(&ask->sk, key_data, key_datalen); return err; } #else static inline int alg_setkey_by_key_serial(struct alg_sock *ask, sockptr_t optval, unsigned int optlen) { return -ENOPROTOOPT; } #endif static int alg_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type; int err = -EBUSY; lock_sock(sk); if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt)) goto unlock; type = ask->type; err = -ENOPROTOOPT; if (level != SOL_ALG || !type) goto unlock; switch (optname) { case ALG_SET_KEY: case ALG_SET_KEY_BY_KEY_SERIAL: if (sock->state == SS_CONNECTED) goto unlock; if (!type->setkey) goto unlock; if (optname == ALG_SET_KEY_BY_KEY_SERIAL) err = alg_setkey_by_key_serial(ask, optval, optlen); else err = alg_setkey(sk, optval, optlen); break; case ALG_SET_AEAD_AUTHSIZE: if (sock->state == SS_CONNECTED) goto unlock; if (!type->setauthsize) goto unlock; err = type->setauthsize(ask->private, optlen); break; case ALG_SET_DRBG_ENTROPY: if (sock->state == SS_CONNECTED) goto unlock; if (!type->setentropy) goto unlock; err = type->setentropy(ask->private, optval, optlen); } unlock: release_sock(sk); return err; } int af_alg_accept(struct sock *sk, struct socket *newsock, struct proto_accept_arg *arg) { struct alg_sock *ask = alg_sk(sk); const struct af_alg_type *type; struct sock *sk2; unsigned int nokey; int err; lock_sock(sk); type = ask->type; err = -EINVAL; if (!type) goto unlock; sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, arg->kern); err = -ENOMEM; if (!sk2) goto unlock; sock_init_data(newsock, sk2); security_sock_graft(sk2, newsock); security_sk_clone(sk, sk2); /* * newsock->ops assigned here to allow type->accept call to override * them when required. */ newsock->ops = type->ops; err = type->accept(ask->private, sk2); nokey = err == -ENOKEY; if (nokey && type->accept_nokey) err = type->accept_nokey(ask->private, sk2); if (err) goto unlock; if (atomic_inc_return_relaxed(&ask->refcnt) == 1) sock_hold(sk); if (nokey) { atomic_inc(&ask->nokey_refcnt); atomic_set(&alg_sk(sk2)->nokey_refcnt, 1); } alg_sk(sk2)->parent = sk; alg_sk(sk2)->type = type; newsock->state = SS_CONNECTED; if (nokey) newsock->ops = type->ops_nokey; err = 0; unlock: release_sock(sk); return err; } EXPORT_SYMBOL_GPL(af_alg_accept); static int alg_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { return af_alg_accept(sock->sk, newsock, arg); } static const struct proto_ops alg_proto_ops = { .family = PF_ALG, .owner = THIS_MODULE, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .mmap = sock_no_mmap, .sendmsg = sock_no_sendmsg, .recvmsg = sock_no_recvmsg, .bind = alg_bind, .release = af_alg_release, .setsockopt = alg_setsockopt, .accept = alg_accept, }; static void alg_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); alg_do_release(ask->type, ask->private); } static int alg_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; int err; if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; if (protocol != 0) return -EPROTONOSUPPORT; err = -ENOMEM; sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto, kern); if (!sk) goto out; sock->ops = &alg_proto_ops; sock_init_data(sock, sk); sk->sk_destruct = alg_sock_destruct; return 0; out: return err; } static const struct net_proto_family alg_family = { .family = PF_ALG, .create = alg_create, .owner = THIS_MODULE, }; static void af_alg_link_sg(struct af_alg_sgl *sgl_prev, struct af_alg_sgl *sgl_new) { sg_unmark_end(sgl_prev->sgt.sgl + sgl_prev->sgt.nents - 1); sg_chain(sgl_prev->sgt.sgl, sgl_prev->sgt.nents + 1, sgl_new->sgt.sgl); } void af_alg_free_sg(struct af_alg_sgl *sgl) { int i; if (sgl->sgt.sgl) { if (sgl->need_unpin) for (i = 0; i < sgl->sgt.nents; i++) unpin_user_page(sg_page(&sgl->sgt.sgl[i])); if (sgl->sgt.sgl != sgl->sgl) kvfree(sgl->sgt.sgl); sgl->sgt.sgl = NULL; } } EXPORT_SYMBOL_GPL(af_alg_free_sg); static int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con) { struct cmsghdr *cmsg; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_ALG) continue; switch (cmsg->cmsg_type) { case ALG_SET_IV: if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv))) return -EINVAL; con->iv = (void *)CMSG_DATA(cmsg); if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen + sizeof(*con->iv))) return -EINVAL; break; case ALG_SET_OP: if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) return -EINVAL; con->op = *(u32 *)CMSG_DATA(cmsg); break; case ALG_SET_AEAD_ASSOCLEN: if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32))) return -EINVAL; con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg); break; default: return -EINVAL; } } return 0; } /** * af_alg_alloc_tsgl - allocate the TX SGL * * @sk: socket of connection to user space * Return: 0 upon success, < 0 upon error */ static int af_alg_alloc_tsgl(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; struct af_alg_tsgl *sgl; struct scatterlist *sg = NULL; sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); if (!list_empty(&ctx->tsgl_list)) sg = sgl->sg; if (!sg || sgl->cur >= MAX_SGL_ENTS) { sgl = sock_kmalloc(sk, struct_size(sgl, sg, (MAX_SGL_ENTS + 1)), GFP_KERNEL); if (!sgl) return -ENOMEM; sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); sgl->cur = 0; if (sg) sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); list_add_tail(&sgl->list, &ctx->tsgl_list); } return 0; } /** * af_alg_count_tsgl - Count number of TX SG entries * * The counting starts from the beginning of the SGL to @bytes. If * an @offset is provided, the counting of the SG entries starts at the @offset. * * @sk: socket of connection to user space * @bytes: Count the number of SG entries holding given number of bytes. * @offset: Start the counting of SG entries from the given offset. * Return: Number of TX SG entries found given the constraints */ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset) { const struct alg_sock *ask = alg_sk(sk); const struct af_alg_ctx *ctx = ask->private; const struct af_alg_tsgl *sgl; unsigned int i; unsigned int sgl_count = 0; if (!bytes) return 0; list_for_each_entry(sgl, &ctx->tsgl_list, list) { const struct scatterlist *sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { size_t bytes_count; /* Skip offset */ if (offset >= sg[i].length) { offset -= sg[i].length; bytes -= sg[i].length; continue; } bytes_count = sg[i].length - offset; offset = 0; sgl_count++; /* If we have seen requested number of bytes, stop */ if (bytes_count >= bytes) return sgl_count; bytes -= bytes_count; } } return sgl_count; } EXPORT_SYMBOL_GPL(af_alg_count_tsgl); /** * af_alg_pull_tsgl - Release the specified buffers from TX SGL * * If @dst is non-null, reassign the pages to @dst. The caller must release * the pages. If @dst_offset is given only reassign the pages to @dst starting * at the @dst_offset (byte). The caller must ensure that @dst is large * enough (e.g. by using af_alg_count_tsgl with the same offset). * * @sk: socket of connection to user space * @used: Number of bytes to pull from TX SGL * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The * caller must release the buffers in dst. * @dst_offset: Reassign the TX SGL from given offset. All buffers before * reaching the offset is released. */ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst, size_t dst_offset) { struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; struct af_alg_tsgl *sgl; struct scatterlist *sg; unsigned int i, j = 0; while (!list_empty(&ctx->tsgl_list)) { sgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list); sg = sgl->sg; for (i = 0; i < sgl->cur; i++) { size_t plen = min_t(size_t, used, sg[i].length); struct page *page = sg_page(sg + i); if (!page) continue; /* * Assumption: caller created af_alg_count_tsgl(len) * SG entries in dst. */ if (dst) { if (dst_offset >= plen) { /* discard page before offset */ dst_offset -= plen; } else { /* reassign page to dst after offset */ get_page(page); sg_set_page(dst + j, page, plen - dst_offset, sg[i].offset + dst_offset); dst_offset = 0; j++; } } sg[i].length -= plen; sg[i].offset += plen; used -= plen; ctx->used -= plen; if (sg[i].length) return; put_page(page); sg_assign_page(sg + i, NULL); } list_del(&sgl->list); sock_kfree_s(sk, sgl, struct_size(sgl, sg, MAX_SGL_ENTS + 1)); } if (!ctx->used) ctx->merge = 0; ctx->init = ctx->more; } EXPORT_SYMBOL_GPL(af_alg_pull_tsgl); /** * af_alg_free_areq_sgls - Release TX and RX SGLs of the request * * @areq: Request holding the TX and RX SGL */ static void af_alg_free_areq_sgls(struct af_alg_async_req *areq) { struct sock *sk = areq->sk; struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; struct af_alg_rsgl *rsgl, *tmp; struct scatterlist *tsgl; struct scatterlist *sg; unsigned int i; list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { atomic_sub(rsgl->sg_num_bytes, &ctx->rcvused); af_alg_free_sg(&rsgl->sgl); list_del(&rsgl->list); if (rsgl != &areq->first_rsgl) sock_kfree_s(sk, rsgl, sizeof(*rsgl)); } tsgl = areq->tsgl; if (tsgl) { for_each_sg(tsgl, sg, areq->tsgl_entries, i) { if (!sg_page(sg)) continue; put_page(sg_page(sg)); } sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); } } /** * af_alg_wait_for_wmem - wait for availability of writable memory * * @sk: socket of connection to user space * @flags: If MSG_DONTWAIT is set, then only report if function would sleep * Return: 0 when writable memory is available, < 0 upon error */ static int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int err = -ERESTARTSYS; long timeout; if (flags & MSG_DONTWAIT) return -EAGAIN; sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); add_wait_queue(sk_sleep(sk), &wait); for (;;) { if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; if (sk_wait_event(sk, &timeout, af_alg_writable(sk), &wait)) { err = 0; break; } } remove_wait_queue(sk_sleep(sk), &wait); return err; } /** * af_alg_wmem_wakeup - wakeup caller when writable memory is available * * @sk: socket of connection to user space */ void af_alg_wmem_wakeup(struct sock *sk) { struct socket_wq *wq; if (!af_alg_writable(sk)) return; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup); /** * af_alg_wait_for_data - wait for availability of TX data * * @sk: socket of connection to user space * @flags: If MSG_DONTWAIT is set, then only report if function would sleep * @min: Set to minimum request size if partial requests are allowed. * Return: 0 when writable memory is available, < 0 upon error */ int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; long timeout; int err = -ERESTARTSYS; if (flags & MSG_DONTWAIT) return -EAGAIN; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); add_wait_queue(sk_sleep(sk), &wait); for (;;) { if (signal_pending(current)) break; timeout = MAX_SCHEDULE_TIMEOUT; if (sk_wait_event(sk, &timeout, ctx->init && (!ctx->more || (min && ctx->used >= min)), &wait)) { err = 0; break; } } remove_wait_queue(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); return err; } EXPORT_SYMBOL_GPL(af_alg_wait_for_data); /** * af_alg_data_wakeup - wakeup caller when new data can be sent to kernel * * @sk: socket of connection to user space */ static void af_alg_data_wakeup(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; struct socket_wq *wq; if (!ctx->used) return; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (skwq_has_sleeper(wq)) wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | EPOLLRDNORM | EPOLLRDBAND); sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } /** * af_alg_sendmsg - implementation of sendmsg system call handler * * The sendmsg system call handler obtains the user data and stores it * in ctx->tsgl_list. This implies allocation of the required numbers of * struct af_alg_tsgl. * * In addition, the ctx is filled with the information sent via CMSG. * * @sock: socket of connection to user space * @msg: message from user space * @size: size of message from user space * @ivsize: the size of the IV for the cipher operation to verify that the * user-space-provided IV has the right size * Return: the number of copied data upon success, < 0 upon error */ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size, unsigned int ivsize) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; struct af_alg_tsgl *sgl; struct af_alg_control con = {}; long copied = 0; bool enc = false; bool init = false; int err = 0; if (msg->msg_controllen) { err = af_alg_cmsg_send(msg, &con); if (err) return err; init = true; switch (con.op) { case ALG_OP_ENCRYPT: enc = true; break; case ALG_OP_DECRYPT: enc = false; break; default: return -EINVAL; } if (con.iv && con.iv->ivlen != ivsize) return -EINVAL; } lock_sock(sk); if (ctx->init && !ctx->more) { if (ctx->used) { err = -EINVAL; goto unlock; } pr_info_once( "%s sent an empty control message without MSG_MORE.\n", current->comm); } ctx->init = true; if (init) { ctx->enc = enc; if (con.iv) memcpy(ctx->iv, con.iv->iv, ivsize); ctx->aead_assoclen = con.aead_assoclen; } while (size) { struct scatterlist *sg; size_t len = size; ssize_t plen; /* use the existing memory in an allocated page */ if (ctx->merge && !(msg->msg_flags & MSG_SPLICE_PAGES)) { sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); sg = sgl->sg + sgl->cur - 1; len = min_t(size_t, len, PAGE_SIZE - sg->offset - sg->length); err = memcpy_from_msg(page_address(sg_page(sg)) + sg->offset + sg->length, msg, len); if (err) goto unlock; sg->length += len; ctx->merge = (sg->offset + sg->length) & (PAGE_SIZE - 1); ctx->used += len; copied += len; size -= len; continue; } if (!af_alg_writable(sk)) { err = af_alg_wait_for_wmem(sk, msg->msg_flags); if (err) goto unlock; } /* allocate a new page */ len = min_t(unsigned long, len, af_alg_sndbuf(sk)); err = af_alg_alloc_tsgl(sk); if (err) goto unlock; sgl = list_entry(ctx->tsgl_list.prev, struct af_alg_tsgl, list); sg = sgl->sg; if (sgl->cur) sg_unmark_end(sg + sgl->cur - 1); if (msg->msg_flags & MSG_SPLICE_PAGES) { struct sg_table sgtable = { .sgl = sg, .nents = sgl->cur, .orig_nents = sgl->cur, }; plen = extract_iter_to_sg(&msg->msg_iter, len, &sgtable, MAX_SGL_ENTS - sgl->cur, 0); if (plen < 0) { err = plen; goto unlock; } for (; sgl->cur < sgtable.nents; sgl->cur++) get_page(sg_page(&sg[sgl->cur])); len -= plen; ctx->used += plen; copied += plen; size -= plen; ctx->merge = 0; } else { do { struct page *pg; unsigned int i = sgl->cur; plen = min_t(size_t, len, PAGE_SIZE); pg = alloc_page(GFP_KERNEL); if (!pg) { err = -ENOMEM; goto unlock; } sg_assign_page(sg + i, pg); err = memcpy_from_msg( page_address(sg_page(sg + i)), msg, plen); if (err) { __free_page(sg_page(sg + i)); sg_assign_page(sg + i, NULL); goto unlock; } sg[i].length = plen; len -= plen; ctx->used += plen; copied += plen; size -= plen; sgl->cur++; } while (len && sgl->cur < MAX_SGL_ENTS); ctx->merge = plen & (PAGE_SIZE - 1); } if (!size) sg_mark_end(sg + sgl->cur - 1); } err = 0; ctx->more = msg->msg_flags & MSG_MORE; unlock: af_alg_data_wakeup(sk); release_sock(sk); return copied ?: err; } EXPORT_SYMBOL_GPL(af_alg_sendmsg); /** * af_alg_free_resources - release resources required for crypto request * @areq: Request holding the TX and RX SGL */ void af_alg_free_resources(struct af_alg_async_req *areq) { struct sock *sk = areq->sk; struct af_alg_ctx *ctx; af_alg_free_areq_sgls(areq); sock_kfree_s(sk, areq, areq->areqlen); ctx = alg_sk(sk)->private; ctx->inflight = false; } EXPORT_SYMBOL_GPL(af_alg_free_resources); /** * af_alg_async_cb - AIO callback handler * @data: async request completion data * @err: if non-zero, error result to be returned via ki_complete(); * otherwise return the AIO output length via ki_complete(). * * This handler cleans up the struct af_alg_async_req upon completion of the * AIO operation. * * The number of bytes to be generated with the AIO operation must be set * in areq->outlen before the AIO callback handler is invoked. */ void af_alg_async_cb(void *data, int err) { struct af_alg_async_req *areq = data; struct sock *sk = areq->sk; struct kiocb *iocb = areq->iocb; unsigned int resultlen; /* Buffer size written by crypto operation. */ resultlen = areq->outlen; af_alg_free_resources(areq); sock_put(sk); iocb->ki_complete(iocb, err ? err : (int)resultlen); } EXPORT_SYMBOL_GPL(af_alg_async_cb); /** * af_alg_poll - poll system call handler * @file: file pointer * @sock: socket to poll * @wait: poll_table */ __poll_t af_alg_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; __poll_t mask; sock_poll_wait(file, sock, wait); mask = 0; if (!ctx->more || ctx->used) mask |= EPOLLIN | EPOLLRDNORM; if (af_alg_writable(sk)) mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; return mask; } EXPORT_SYMBOL_GPL(af_alg_poll); /** * af_alg_alloc_areq - allocate struct af_alg_async_req * * @sk: socket of connection to user space * @areqlen: size of struct af_alg_async_req + crypto_*_reqsize * Return: allocated data structure or ERR_PTR upon error */ struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk, unsigned int areqlen) { struct af_alg_ctx *ctx = alg_sk(sk)->private; struct af_alg_async_req *areq; /* Only one AIO request can be in flight. */ if (ctx->inflight) return ERR_PTR(-EBUSY); areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); if (unlikely(!areq)) return ERR_PTR(-ENOMEM); ctx->inflight = true; areq->areqlen = areqlen; areq->sk = sk; areq->first_rsgl.sgl.sgt.sgl = areq->first_rsgl.sgl.sgl; areq->last_rsgl = NULL; INIT_LIST_HEAD(&areq->rsgl_list); areq->tsgl = NULL; areq->tsgl_entries = 0; return areq; } EXPORT_SYMBOL_GPL(af_alg_alloc_areq); /** * af_alg_get_rsgl - create the RX SGL for the output data from the crypto * operation * * @sk: socket of connection to user space * @msg: user space message * @flags: flags used to invoke recvmsg with * @areq: instance of the cryptographic request that will hold the RX SGL * @maxsize: maximum number of bytes to be pulled from user space * @outlen: number of bytes in the RX SGL * Return: 0 on success, < 0 upon error */ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags, struct af_alg_async_req *areq, size_t maxsize, size_t *outlen) { struct alg_sock *ask = alg_sk(sk); struct af_alg_ctx *ctx = ask->private; size_t len = 0; while (maxsize > len && msg_data_left(msg)) { struct af_alg_rsgl *rsgl; ssize_t err; size_t seglen; /* limit the amount of readable buffers */ if (!af_alg_readable(sk)) break; seglen = min_t(size_t, (maxsize - len), msg_data_left(msg)); if (list_empty(&areq->rsgl_list)) { rsgl = &areq->first_rsgl; } else { rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); if (unlikely(!rsgl)) return -ENOMEM; } rsgl->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter); rsgl->sgl.sgt.sgl = rsgl->sgl.sgl; rsgl->sgl.sgt.nents = 0; rsgl->sgl.sgt.orig_nents = 0; list_add_tail(&rsgl->list, &areq->rsgl_list); sg_init_table(rsgl->sgl.sgt.sgl, ALG_MAX_PAGES); err = extract_iter_to_sg(&msg->msg_iter, seglen, &rsgl->sgl.sgt, ALG_MAX_PAGES, 0); if (err < 0) { rsgl->sg_num_bytes = 0; return err; } sg_mark_end(rsgl->sgl.sgt.sgl + rsgl->sgl.sgt.nents - 1); /* chain the new scatterlist with previous one */ if (areq->last_rsgl) af_alg_link_sg(&areq->last_rsgl->sgl, &rsgl->sgl); areq->last_rsgl = rsgl; len += err; atomic_add(err, &ctx->rcvused); rsgl->sg_num_bytes = err; } *outlen = len; return 0; } EXPORT_SYMBOL_GPL(af_alg_get_rsgl); static int __init af_alg_init(void) { int err = proto_register(&alg_proto, 0); if (err) goto out; err = sock_register(&alg_family); if (err != 0) goto out_unregister_proto; out: return err; out_unregister_proto: proto_unregister(&alg_proto); goto out; } static void __exit af_alg_exit(void) { sock_unregister(PF_ALG); proto_unregister(&alg_proto); } module_init(af_alg_init); module_exit(af_alg_exit); MODULE_DESCRIPTION("Crypto userspace interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(AF_ALG); |
| 36 35 36 36 36 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | // SPDX-License-Identifier: GPL-2.0 /* * CPU <-> hardware queue mapping helpers * * Copyright (C) 2013-2014 Jens Axboe */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/group_cpus.h> #include "blk.h" #include "blk-mq.h" void blk_mq_map_queues(struct blk_mq_queue_map *qmap) { const struct cpumask *masks; unsigned int queue, cpu; masks = group_cpus_evenly(qmap->nr_queues); if (!masks) { for_each_possible_cpu(cpu) qmap->mq_map[cpu] = qmap->queue_offset; return; } for (queue = 0; queue < qmap->nr_queues; queue++) { for_each_cpu(cpu, &masks[queue]) qmap->mq_map[cpu] = qmap->queue_offset + queue; } kfree(masks); } EXPORT_SYMBOL_GPL(blk_mq_map_queues); /** * blk_mq_hw_queue_to_node - Look up the memory node for a hardware queue index * @qmap: CPU to hardware queue map. * @index: hardware queue index. * * We have no quick way of doing reverse lookups. This is only used at * queue init time, so runtime isn't important. */ int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index) { int i; for_each_possible_cpu(i) { if (index == qmap->mq_map[i]) return cpu_to_node(i); } return NUMA_NO_NODE; } |
| 2675 2671 2675 37 37 37 105 84 56 91 44 54 97 97 97 97 76 76 76 39 15751 15760 1326 15100 4 15749 15758 10953 2 10955 15001 12 15000 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 | // SPDX-License-Identifier: GPL-2.0 /* * mm/pgtable-generic.c * * Generic pgtable methods declared in linux/pgtable.h * * Copyright (C) 2010 Linus Torvalds */ #include <linux/pagemap.h> #include <linux/hugetlb.h> #include <linux/pgtable.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/mm_inline.h> #include <asm/pgalloc.h> #include <asm/tlb.h> /* * If a p?d_bad entry is found while walking page tables, report * the error, before resetting entry to p?d_none. Usually (but * very seldom) called out from the p?d_none_or_clear_bad macros. */ void pgd_clear_bad(pgd_t *pgd) { pgd_ERROR(*pgd); pgd_clear(pgd); } #ifndef __PAGETABLE_P4D_FOLDED void p4d_clear_bad(p4d_t *p4d) { p4d_ERROR(*p4d); p4d_clear(p4d); } #endif #ifndef __PAGETABLE_PUD_FOLDED void pud_clear_bad(pud_t *pud) { pud_ERROR(*pud); pud_clear(pud); } #endif /* * Note that the pmd variant below can't be stub'ed out just as for p4d/pud * above. pmd folding is special and typically pmd_* macros refer to upper * level even when folded */ void pmd_clear_bad(pmd_t *pmd) { pmd_ERROR(*pmd); pmd_clear(pmd); } #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* * Only sets the access flags (dirty, accessed), as well as write * permission. Furthermore, we know it always gets set to a "more * permissive" setting, which allows most architectures to optimize * this. We return whether the PTE actually changed, which in turn * instructs the caller to do things like update__mmu_cache. This * used to be done in the caller, but sparc needs minor faults to * force that call on sun4c so we changed this macro slightly */ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { int changed = !pte_same(ptep_get(ptep), entry); if (changed) { set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_fix_spurious_fault(vma, address, ptep); } return changed; } #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH int ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { int young; young = ptep_test_and_clear_young(vma, address, ptep); if (young) flush_tlb_page(vma, address); return young; } #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { struct mm_struct *mm = (vma)->vm_mm; pte_t pte; pte = ptep_get_and_clear(mm, address, ptep); if (pte_accessible(mm, pte)) flush_tlb_page(vma, address); return pte; } #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp, pmd_t entry, int dirty) { int changed = !pmd_same(*pmdp, entry); VM_BUG_ON(address & ~HPAGE_PMD_MASK); if (changed) { set_pmd_at(vma->vm_mm, address, pmdp, entry); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); } return changed; } #endif #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { int young; VM_BUG_ON(address & ~HPAGE_PMD_MASK); young = pmdp_test_and_clear_young(vma, address, pmdp); if (young) flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return young; } #endif #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { pmd_t pmd; VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return pmd; } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address, pud_t *pudp) { pud_t pud; VM_BUG_ON(address & ~HPAGE_PUD_MASK); VM_BUG_ON(!pud_trans_huge(*pudp) && !pud_devmap(*pudp)); pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE); return pud; } #endif #endif #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, pgtable_t pgtable) { assert_spin_locked(pmd_lockptr(mm, pmdp)); /* FIFO */ if (!pmd_huge_pte(mm, pmdp)) INIT_LIST_HEAD(&pgtable->lru); else list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru); pmd_huge_pte(mm, pmdp) = pgtable; } #endif #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW /* no "address" argument so destroys page coloring of some arch */ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) { pgtable_t pgtable; assert_spin_locked(pmd_lockptr(mm, pmdp)); /* FIFO */ pgtable = pmd_huge_pte(mm, pmdp); pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru, struct page, lru); if (pmd_huge_pte(mm, pmdp)) list_del(&pgtable->lru); return pgtable; } #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { VM_WARN_ON_ONCE(!pmd_present(*pmdp)); pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp)); flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return old; } #endif #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { VM_WARN_ON_ONCE(!pmd_present(*pmdp)); return pmdp_invalidate(vma, address, pmdp); } #endif #ifndef pmdp_collapse_flush pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) { /* * pmd and hugepage pte format are same. So we could * use the same function. */ pmd_t pmd; VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_trans_huge(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); /* collapse entails shooting down ptes not pmd */ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return pmd; } #endif /* arch define pte_free_defer in asm/pgalloc.h for its own implementation */ #ifndef pte_free_defer static void pte_free_now(struct rcu_head *head) { struct page *page; page = container_of(head, struct page, rcu_head); pte_free(NULL /* mm not passed and not used */, (pgtable_t)page); } void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) { struct page *page; page = pgtable; call_rcu(&page->rcu_head, pte_free_now); } #endif /* pte_free_defer */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \ (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU)) /* * See the comment above ptep_get_lockless() in include/linux/pgtable.h: * the barriers in pmdp_get_lockless() cannot guarantee that the value in * pmd_high actually belongs with the value in pmd_low; but holding interrupts * off blocks the TLB flush between present updates, which guarantees that a * successful __pte_offset_map() points to a page from matched halves. */ static unsigned long pmdp_get_lockless_start(void) { unsigned long irqflags; local_irq_save(irqflags); return irqflags; } static void pmdp_get_lockless_end(unsigned long irqflags) { local_irq_restore(irqflags); } #else static unsigned long pmdp_get_lockless_start(void) { return 0; } static void pmdp_get_lockless_end(unsigned long irqflags) { } #endif pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) { unsigned long irqflags; pmd_t pmdval; rcu_read_lock(); irqflags = pmdp_get_lockless_start(); pmdval = pmdp_get_lockless(pmd); pmdp_get_lockless_end(irqflags); if (pmdvalp) *pmdvalp = pmdval; if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval))) goto nomap; if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval))) goto nomap; if (unlikely(pmd_bad(pmdval))) { pmd_clear_bad(pmd); goto nomap; } return __pte_map(&pmdval, addr); nomap: rcu_read_unlock(); return NULL; } pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, spinlock_t **ptlp) { pmd_t pmdval; pte_t *pte; pte = __pte_offset_map(pmd, addr, &pmdval); if (likely(pte)) *ptlp = pte_lockptr(mm, &pmdval); return pte; } /* * pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation * __pte_offset_map_lock() below, is usually called with the pmd pointer for * addr, reached by walking down the mm's pgd, p4d, pud for addr: either while * holding mmap_lock or vma lock for read or for write; or in truncate or rmap * context, while holding file's i_mmap_lock or anon_vma lock for read (or for * write). In a few cases, it may be used with pmd pointing to a pmd_t already * copied to or constructed on the stack. * * When successful, it returns the pte pointer for addr, with its page table * kmapped if necessary (when CONFIG_HIGHPTE), and locked against concurrent * modification by software, with a pointer to that spinlock in ptlp (in some * configs mm->page_table_lock, in SPLIT_PTLOCK configs a spinlock in table's * struct page). pte_unmap_unlock(pte, ptl) to unlock and unmap afterwards. * * But it is unsuccessful, returning NULL with *ptlp unchanged, if there is no * page table at *pmd: if, for example, the page table has just been removed, * or replaced by the huge pmd of a THP. (When successful, *pmd is rechecked * after acquiring the ptlock, and retried internally if it changed: so that a * page table can be safely removed or replaced by THP while holding its lock.) * * pte_offset_map(pmd, addr), and its internal helper __pte_offset_map() above, * just returns the pte pointer for addr, its page table kmapped if necessary; * or NULL if there is no page table at *pmd. It does not attempt to lock the * page table, so cannot normally be used when the page table is to be updated, * or when entries read must be stable. But it does take rcu_read_lock(): so * that even when page table is racily removed, it remains a valid though empty * and disconnected table. Until pte_unmap(pte) unmaps and rcu_read_unlock()s * afterwards. * * pte_offset_map_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map(); * but when successful, it also outputs a pointer to the spinlock in ptlp - as * pte_offset_map_lock() does, but in this case without locking it. This helps * the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time * act on a changed *pmd: pte_offset_map_nolock() provides the correct spinlock * pointer for the page table that it returns. In principle, the caller should * recheck *pmd once the lock is taken; in practice, no callsite needs that - * either the mmap_lock for write, or pte_same() check on contents, is enough. * * Note that free_pgtables(), used after unmapping detached vmas, or when * exiting the whole mm, does not take page table lock before freeing a page * table, and may not use RCU at all: "outsiders" like khugepaged should avoid * pte_offset_map() and co once the vma is detached from mm or mm_users is zero. */ pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, spinlock_t **ptlp) { spinlock_t *ptl; pmd_t pmdval; pte_t *pte; again: pte = __pte_offset_map(pmd, addr, &pmdval); if (unlikely(!pte)) return pte; ptl = pte_lockptr(mm, &pmdval); spin_lock(ptl); if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) { *ptlp = ptl; return pte; } pte_unmap_unlock(pte, ptl); goto again; } |
| 4 2 3 2 1 1 4 6 14 8 15 15 14 5 8 1 2 6 4 5 5 1 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 | // SPDX-License-Identifier: GPL-2.0-only /* * ebt_among * * Authors: * Grzegorz Borowiak <grzes@gnu.univ.gda.pl> * * August, 2003 * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/ip.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_among.h> static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh, const char *mac, __be32 ip) { /* You may be puzzled as to how this code works. * Some tricks were used, refer to * include/linux/netfilter_bridge/ebt_among.h * as there you can find a solution of this mystery. */ const struct ebt_mac_wormhash_tuple *p; int start, limit, i; uint32_t cmp[2] = { 0, 0 }; int key = ((const unsigned char *)mac)[5]; ether_addr_copy(((char *) cmp) + 2, mac); start = wh->table[key]; limit = wh->table[key + 1]; if (ip) { for (i = start; i < limit; i++) { p = &wh->pool[i]; if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0]) if (p->ip == 0 || p->ip == ip) return true; } } else { for (i = start; i < limit; i++) { p = &wh->pool[i]; if (cmp[1] == p->cmp[1] && cmp[0] == p->cmp[0]) if (p->ip == 0) return true; } } return false; } static int ebt_mac_wormhash_check_integrity(const struct ebt_mac_wormhash *wh) { int i; for (i = 0; i < 256; i++) { if (wh->table[i] > wh->table[i + 1]) return -0x100 - i; if (wh->table[i] < 0) return -0x200 - i; if (wh->table[i] > wh->poolsize) return -0x300 - i; } if (wh->table[256] > wh->poolsize) return -0xc00; return 0; } static int get_ip_dst(const struct sk_buff *skb, __be32 *addr) { if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { const struct iphdr *ih; struct iphdr _iph; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) return -1; *addr = ih->daddr; } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { const struct arphdr *ah; struct arphdr _arph; const __be32 *bp; __be32 buf; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL || ah->ar_pln != sizeof(__be32) || ah->ar_hln != ETH_ALEN) return -1; bp = skb_header_pointer(skb, sizeof(struct arphdr) + 2 * ETH_ALEN + sizeof(__be32), sizeof(__be32), &buf); if (bp == NULL) return -1; *addr = *bp; } return 0; } static int get_ip_src(const struct sk_buff *skb, __be32 *addr) { if (eth_hdr(skb)->h_proto == htons(ETH_P_IP)) { const struct iphdr *ih; struct iphdr _iph; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) return -1; *addr = ih->saddr; } else if (eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) { const struct arphdr *ah; struct arphdr _arph; const __be32 *bp; __be32 buf; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL || ah->ar_pln != sizeof(__be32) || ah->ar_hln != ETH_ALEN) return -1; bp = skb_header_pointer(skb, sizeof(struct arphdr) + ETH_ALEN, sizeof(__be32), &buf); if (bp == NULL) return -1; *addr = *bp; } return 0; } static bool ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_among_info *info = par->matchinfo; const char *dmac, *smac; const struct ebt_mac_wormhash *wh_dst, *wh_src; __be32 dip = 0, sip = 0; wh_dst = ebt_among_wh_dst(info); wh_src = ebt_among_wh_src(info); if (wh_src) { smac = eth_hdr(skb)->h_source; if (get_ip_src(skb, &sip)) return false; if (!(info->bitmask & EBT_AMONG_SRC_NEG)) { /* we match only if it contains */ if (!ebt_mac_wormhash_contains(wh_src, smac, sip)) return false; } else { /* we match only if it DOES NOT contain */ if (ebt_mac_wormhash_contains(wh_src, smac, sip)) return false; } } if (wh_dst) { dmac = eth_hdr(skb)->h_dest; if (get_ip_dst(skb, &dip)) return false; if (!(info->bitmask & EBT_AMONG_DST_NEG)) { /* we match only if it contains */ if (!ebt_mac_wormhash_contains(wh_dst, dmac, dip)) return false; } else { /* we match only if it DOES NOT contain */ if (ebt_mac_wormhash_contains(wh_dst, dmac, dip)) return false; } } return true; } static bool poolsize_invalid(const struct ebt_mac_wormhash *w) { return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple)); } static bool wormhash_offset_invalid(int off, unsigned int len) { if (off == 0) /* not present */ return false; if (off < (int)sizeof(struct ebt_among_info) || off % __alignof__(struct ebt_mac_wormhash)) return true; off += sizeof(struct ebt_mac_wormhash); return off > len; } static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b) { if (a == 0) a = sizeof(struct ebt_among_info); return ebt_mac_wormhash_size(wh) + a == b; } static int ebt_among_mt_check(const struct xt_mtchk_param *par) { const struct ebt_among_info *info = par->matchinfo; const struct ebt_entry_match *em = container_of(par->matchinfo, const struct ebt_entry_match, data); unsigned int expected_length = sizeof(struct ebt_among_info); const struct ebt_mac_wormhash *wh_dst, *wh_src; int err; if (expected_length > em->match_size) return -EINVAL; if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) || wormhash_offset_invalid(info->wh_src_ofs, em->match_size)) return -EINVAL; wh_dst = ebt_among_wh_dst(info); if (poolsize_invalid(wh_dst)) return -EINVAL; expected_length += ebt_mac_wormhash_size(wh_dst); if (expected_length > em->match_size) return -EINVAL; wh_src = ebt_among_wh_src(info); if (poolsize_invalid(wh_src)) return -EINVAL; if (info->wh_src_ofs < info->wh_dst_ofs) { if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs)) return -EINVAL; } else { if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs)) return -EINVAL; } expected_length += ebt_mac_wormhash_size(wh_src); if (em->match_size != EBT_ALIGN(expected_length)) { pr_err_ratelimited("wrong size: %d against expected %d, rounded to %zd\n", em->match_size, expected_length, EBT_ALIGN(expected_length)); return -EINVAL; } if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) { pr_err_ratelimited("dst integrity fail: %x\n", -err); return -EINVAL; } if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) { pr_err_ratelimited("src integrity fail: %x\n", -err); return -EINVAL; } return 0; } static struct xt_match ebt_among_mt_reg __read_mostly = { .name = "among", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_among_mt, .checkentry = ebt_among_mt_check, .matchsize = -1, /* special case */ .me = THIS_MODULE, }; static int __init ebt_among_init(void) { return xt_register_match(&ebt_among_mt_reg); } static void __exit ebt_among_fini(void) { xt_unregister_match(&ebt_among_mt_reg); } module_init(ebt_among_init); module_exit(ebt_among_fini); MODULE_DESCRIPTION("Ebtables: Combined MAC/IP address list matching"); MODULE_LICENSE("GPL"); |
| 9 5 8 4 4 3 4 1 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 | // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "btree_update_interior.h" #include "buckets.h" #include "error.h" #include "journal_io.h" #include "replicas.h" #include "sb-clean.h" #include "super-io.h" /* * BCH_SB_FIELD_clean: * * Btree roots, and a few other things, are recovered from the journal after an * unclean shutdown - but after a clean shutdown, to avoid having to read the * journal, we can store them in the superblock. * * bch_sb_field_clean simply contains a list of journal entries, stored exactly * as they would be in the journal: */ int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *clean, int write) { struct jset_entry *entry; int ret; for (entry = clean->start; entry < (struct jset_entry *) vstruct_end(&clean->field); entry = vstruct_next(entry)) { if (vstruct_end(entry) > vstruct_end(&clean->field)) { bch_err(c, "journal entry (u64s %u) overran end of superblock clean section (u64s %u) by %zu", le16_to_cpu(entry->u64s), le32_to_cpu(clean->field.u64s), (u64 *) vstruct_end(entry) - (u64 *) vstruct_end(&clean->field)); bch2_sb_error_count(c, BCH_FSCK_ERR_sb_clean_entry_overrun); return -BCH_ERR_fsck_repair_unimplemented; } ret = bch2_journal_entry_validate(c, NULL, entry, le16_to_cpu(c->disk_sb.sb->version), BCH_SB_BIG_ENDIAN(c->disk_sb.sb), write); if (ret) return ret; } return 0; } static struct bkey_i *btree_root_find(struct bch_fs *c, struct bch_sb_field_clean *clean, struct jset *j, enum btree_id id, unsigned *level) { struct bkey_i *k; struct jset_entry *entry, *start, *end; if (clean) { start = clean->start; end = vstruct_end(&clean->field); } else { start = j->start; end = vstruct_last(j); } for (entry = start; entry < end; entry = vstruct_next(entry)) if (entry->type == BCH_JSET_ENTRY_btree_root && entry->btree_id == id) goto found; return NULL; found: if (!entry->u64s) return ERR_PTR(-EINVAL); k = entry->start; *level = entry->level; return k; } int bch2_verify_superblock_clean(struct bch_fs *c, struct bch_sb_field_clean **cleanp, struct jset *j) { unsigned i; struct bch_sb_field_clean *clean = *cleanp; struct printbuf buf1 = PRINTBUF; struct printbuf buf2 = PRINTBUF; int ret = 0; if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c, sb_clean_journal_seq_mismatch, "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown", le64_to_cpu(clean->journal_seq), le64_to_cpu(j->seq))) { kfree(clean); *cleanp = NULL; return 0; } for (i = 0; i < BTREE_ID_NR; i++) { struct bkey_i *k1, *k2; unsigned l1 = 0, l2 = 0; k1 = btree_root_find(c, clean, NULL, i, &l1); k2 = btree_root_find(c, NULL, j, i, &l2); if (!k1 && !k2) continue; printbuf_reset(&buf1); printbuf_reset(&buf2); if (k1) bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1)); else prt_printf(&buf1, "(none)"); if (k2) bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2)); else prt_printf(&buf2, "(none)"); mustfix_fsck_err_on(!k1 || !k2 || IS_ERR(k1) || IS_ERR(k2) || k1->k.u64s != k2->k.u64s || memcmp(k1, k2, bkey_bytes(&k1->k)) || l1 != l2, c, sb_clean_btree_root_mismatch, "superblock btree root %u doesn't match journal after clean shutdown\n" "sb: l=%u %s\n" "journal: l=%u %s\n", i, l1, buf1.buf, l2, buf2.buf); } fsck_err: printbuf_exit(&buf2); printbuf_exit(&buf1); return ret; } struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c) { struct bch_sb_field_clean *clean, *sb_clean; int ret; mutex_lock(&c->sb_lock); sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean); if (fsck_err_on(!sb_clean, c, sb_clean_missing, "superblock marked clean but clean section not present")) { SET_BCH_SB_CLEAN(c->disk_sb.sb, false); c->sb.clean = false; mutex_unlock(&c->sb_lock); return NULL; } clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field), GFP_KERNEL); if (!clean) { mutex_unlock(&c->sb_lock); return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean); } ret = bch2_sb_clean_validate_late(c, clean, READ); if (ret) { mutex_unlock(&c->sb_lock); return ERR_PTR(ret); } mutex_unlock(&c->sb_lock); return clean; fsck_err: mutex_unlock(&c->sb_lock); return ERR_PTR(ret); } void bch2_journal_super_entries_add_common(struct bch_fs *c, struct jset_entry **end, u64 journal_seq) { { struct jset_entry_usage *u = container_of(jset_entry_init(end, sizeof(*u)), struct jset_entry_usage, entry); u->entry.type = BCH_JSET_ENTRY_usage; u->entry.btree_id = BCH_FS_USAGE_key_version; u->v = cpu_to_le64(atomic64_read(&c->key_version)); } for (unsigned i = 0; i < 2; i++) { struct jset_entry_clock *clock = container_of(jset_entry_init(end, sizeof(*clock)), struct jset_entry_clock, entry); clock->entry.type = BCH_JSET_ENTRY_clock; clock->rw = i; clock->time = cpu_to_le64(atomic64_read(&c->io_clock[i].now)); } } static int bch2_sb_clean_validate(struct bch_sb *sb, struct bch_sb_field *f, enum bch_validate_flags flags, struct printbuf *err) { struct bch_sb_field_clean *clean = field_to_type(f, clean); if (vstruct_bytes(&clean->field) < sizeof(*clean)) { prt_printf(err, "wrong size (got %zu should be %zu)", vstruct_bytes(&clean->field), sizeof(*clean)); return -BCH_ERR_invalid_sb_clean; } for (struct jset_entry *entry = clean->start; entry != vstruct_end(&clean->field); entry = vstruct_next(entry)) { if ((void *) vstruct_next(entry) > vstruct_end(&clean->field)) { prt_str(err, "entry type "); bch2_prt_jset_entry_type(err, entry->type); prt_str(err, " overruns end of section"); return -BCH_ERR_invalid_sb_clean; } } return 0; } static void bch2_sb_clean_to_text(struct printbuf *out, struct bch_sb *sb, struct bch_sb_field *f) { struct bch_sb_field_clean *clean = field_to_type(f, clean); struct jset_entry *entry; prt_printf(out, "flags: %x\n", le32_to_cpu(clean->flags)); prt_printf(out, "journal_seq: %llu\n", le64_to_cpu(clean->journal_seq)); for (entry = clean->start; entry != vstruct_end(&clean->field); entry = vstruct_next(entry)) { if ((void *) vstruct_next(entry) > vstruct_end(&clean->field)) break; if (entry->type == BCH_JSET_ENTRY_btree_keys && !entry->u64s) continue; bch2_journal_entry_to_text(out, NULL, entry); prt_newline(out); } } const struct bch_sb_field_ops bch_sb_field_ops_clean = { .validate = bch2_sb_clean_validate, .to_text = bch2_sb_clean_to_text, }; int bch2_fs_mark_dirty(struct bch_fs *c) { int ret; /* * Unconditionally write superblock, to verify it hasn't changed before * we go rw: */ mutex_lock(&c->sb_lock); SET_BCH_SB_CLEAN(c->disk_sb.sb, false); c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS); ret = bch2_write_super(c); mutex_unlock(&c->sb_lock); return ret; } void bch2_fs_mark_clean(struct bch_fs *c) { struct bch_sb_field_clean *sb_clean; struct jset_entry *entry; unsigned u64s; int ret; mutex_lock(&c->sb_lock); if (BCH_SB_CLEAN(c->disk_sb.sb)) goto out; SET_BCH_SB_CLEAN(c->disk_sb.sb, true); c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info); c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_metadata); c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_extents_above_btree_updates)); c->disk_sb.sb->features[0] &= cpu_to_le64(~(1ULL << BCH_FEATURE_btree_updates_journalled)); u64s = sizeof(*sb_clean) / sizeof(u64) + c->journal.entry_u64s_reserved; sb_clean = bch2_sb_field_resize(&c->disk_sb, clean, u64s); if (!sb_clean) { bch_err(c, "error resizing superblock while setting filesystem clean"); goto out; } sb_clean->flags = 0; sb_clean->journal_seq = cpu_to_le64(atomic64_read(&c->journal.seq)); /* Trying to catch outstanding bug: */ BUG_ON(le64_to_cpu(sb_clean->journal_seq) > S64_MAX); entry = sb_clean->start; bch2_journal_super_entries_add_common(c, &entry, 0); entry = bch2_btree_roots_to_journal_entries(c, entry, 0); BUG_ON((void *) entry > vstruct_end(&sb_clean->field)); memset(entry, 0, vstruct_end(&sb_clean->field) - (void *) entry); /* * this should be in the write path, and we should be validating every * superblock section: */ ret = bch2_sb_clean_validate_late(c, sb_clean, WRITE); if (ret) { bch_err(c, "error writing marking filesystem clean: validate error"); goto out; } bch2_journal_pos_from_member_info_set(c); bch2_write_super(c); out: mutex_unlock(&c->sb_lock); } |
| 56780 | 1 2 3 4 5 6 7 8 9 10 11 | /* SPDX-License-Identifier: GPL-2.0 */ #include <asm/processor.h> static inline int phys_addr_valid(resource_size_t addr) { #ifdef CONFIG_PHYS_ADDR_T_64BIT return !(addr >> boot_cpu_data.x86_phys_bits); #else return 1; #endif } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2014 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the hash:mac type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/if_ether.h> #include <net/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 #define IPSET_TYPE_REV_MAX 1 /* bucketsize, initval support */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("hash:mac", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:mac"); /* Type specific function prefix */ #define HTYPE hash_mac /* Member elements */ struct hash_mac4_elem { /* Zero valued IP addresses cannot be stored */ union { unsigned char ether[ETH_ALEN]; __be32 foo[2]; }; }; /* Common functions */ static bool hash_mac4_data_equal(const struct hash_mac4_elem *e1, const struct hash_mac4_elem *e2, u32 *multi) { return ether_addr_equal(e1->ether, e2->ether); } static bool hash_mac4_data_list(struct sk_buff *skb, const struct hash_mac4_elem *e) { if (nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN, e->ether)) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_mac4_data_next(struct hash_mac4_elem *next, const struct hash_mac4_elem *e) { } #define MTYPE hash_mac4 #define HOST_MASK 32 #define IP_SET_EMIT_CREATE #define IP_SET_PROTO_UNDEF #include "ip_set_hash_gen.h" static int hash_mac4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (skb_mac_header(skb) < skb->head || (skb_mac_header(skb) + ETH_HLEN) > skb->data) return -EINVAL; if (opt->flags & IPSET_DIM_ONE_SRC) ether_addr_copy(e.ether, eth_hdr(skb)->h_source); else ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); if (is_zero_ether_addr(e.ether)) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_mac4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_mac4_elem e = { { .foo[0] = 0, .foo[1] = 0 } }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_ETHER] || nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN)) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ether_addr_copy(e.ether, nla_data(tb[IPSET_ATTR_ETHER])); if (is_zero_ether_addr(e.ether)) return -IPSET_ERR_HASH_ELEM; return adtfn(set, &e, &ext, &ext, flags); } static struct ip_set_type hash_mac_type __read_mostly = { .name = "hash:mac", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_MAC, .dimension = IPSET_DIM_ONE, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_mac_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_ETHER] = { .type = NLA_BINARY, .len = ETH_ALEN }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_mac_init(void) { return ip_set_type_register(&hash_mac_type); } static void __exit hash_mac_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_mac_type); } module_init(hash_mac_init); module_exit(hash_mac_fini); |
| 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 | // SPDX-License-Identifier: GPL-2.0-or-later /* * X.25 Packet Layer release 002 * * This is ALPHA test software. This code may break your machine, randomly fail to work with new * releases, misbehave and/or generally screw up. It might even work. * * This code REQUIRES 2.1.15 or higher * * History * X.25 001 Jonathan Naylor Started coding. * 2000-09-04 Henner Eisen Prevent freeing a dangling skb. */ #define pr_fmt(fmt) "X25: " fmt #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/if_arp.h> #include <net/x25.h> #include <net/x25device.h> static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) { struct sock *sk; unsigned short frametype; unsigned int lci; if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) return 0; frametype = skb->data[2]; lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); /* * LCI of zero is always for us, and its always a link control * frame. */ if (lci == 0) { x25_link_control(skb, nb, frametype); return 0; } /* * Find an existing socket. */ if ((sk = x25_find_socket(lci, nb)) != NULL) { int queued = 1; skb_reset_transport_header(skb); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { queued = x25_process_rx_frame(sk, skb); } else { queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)); } bh_unlock_sock(sk); sock_put(sk); return queued; } /* * Is is a Call Request ? if so process it. */ if (frametype == X25_CALL_REQUEST) return x25_rx_call_request(skb, nb, lci); /* * Its not a Call Request, nor is it a control frame. * Can we forward it? */ if (x25_forward_data(lci, nb, skb)) { if (frametype == X25_CLEAR_CONFIRMATION) { x25_clear_forward_by_lci(lci); } kfree_skb(skb); return 1; } /* x25_transmit_clear_request(nb, lci, 0x0D); */ if (frametype != X25_CLEAR_CONFIRMATION) pr_debug("x25_receive_data(): unknown frame type %2x\n",frametype); return 0; } int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct sk_buff *nskb; struct x25_neigh *nb; if (!net_eq(dev_net(dev), &init_net)) goto drop; nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) goto drop; kfree_skb(skb); skb = nskb; /* * Packet received from unrecognised device, throw it away. */ nb = x25_get_neigh(dev); if (!nb) { pr_debug("unknown neighbour - %s\n", dev->name); goto drop; } if (!pskb_may_pull(skb, 1)) { x25_neigh_put(nb); goto drop; } switch (skb->data[0]) { case X25_IFACE_DATA: skb_pull(skb, 1); if (x25_receive_data(skb, nb)) { x25_neigh_put(nb); goto out; } break; case X25_IFACE_CONNECT: x25_link_established(nb); break; case X25_IFACE_DISCONNECT: x25_link_terminated(nb); break; } x25_neigh_put(nb); drop: kfree_skb(skb); out: return 0; } void x25_establish_link(struct x25_neigh *nb) { struct sk_buff *skb; unsigned char *ptr; switch (nb->dev->type) { case ARPHRD_X25: if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) { pr_err("x25_dev: out of memory\n"); return; } ptr = skb_put(skb, 1); *ptr = X25_IFACE_CONNECT; break; default: return; } skb->protocol = htons(ETH_P_X25); skb->dev = nb->dev; dev_queue_xmit(skb); } void x25_terminate_link(struct x25_neigh *nb) { struct sk_buff *skb; unsigned char *ptr; if (nb->dev->type != ARPHRD_X25) return; skb = alloc_skb(1, GFP_ATOMIC); if (!skb) { pr_err("x25_dev: out of memory\n"); return; } ptr = skb_put(skb, 1); *ptr = X25_IFACE_DISCONNECT; skb->protocol = htons(ETH_P_X25); skb->dev = nb->dev; dev_queue_xmit(skb); } void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb) { unsigned char *dptr; skb_reset_network_header(skb); switch (nb->dev->type) { case ARPHRD_X25: dptr = skb_push(skb, 1); *dptr = X25_IFACE_DATA; break; default: kfree_skb(skb); return; } skb->protocol = htons(ETH_P_X25); skb->dev = nb->dev; dev_queue_xmit(skb); } |
| 63 63 182 181 3 52106 1 1 52106 17730 6303 49505 49514 13 13 49502 49625 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_MMZONE_H #define _LINUX_MMZONE_H #ifndef __ASSEMBLY__ #ifndef __GENERATING_BOUNDS_H #include <linux/spinlock.h> #include <linux/list.h> #include <linux/list_nulls.h> #include <linux/wait.h> #include <linux/bitops.h> #include <linux/cache.h> #include <linux/threads.h> #include <linux/numa.h> #include <linux/init.h> #include <linux/seqlock.h> #include <linux/nodemask.h> #include <linux/pageblock-flags.h> #include <linux/page-flags-layout.h> #include <linux/atomic.h> #include <linux/mm_types.h> #include <linux/page-flags.h> #include <linux/local_lock.h> #include <linux/zswap.h> #include <asm/page.h> /* Free memory management - zoned buddy allocator. */ #ifndef CONFIG_ARCH_FORCE_MAX_ORDER #define MAX_PAGE_ORDER 10 #else #define MAX_PAGE_ORDER CONFIG_ARCH_FORCE_MAX_ORDER #endif #define MAX_ORDER_NR_PAGES (1 << MAX_PAGE_ORDER) #define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES) #define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1) /* * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed * costly to service. That is between allocation orders which should * coalesce naturally under reasonable reclaim pressure and those which * will not. */ #define PAGE_ALLOC_COSTLY_ORDER 3 enum migratetype { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way * ZONE_MOVABLE works. Only movable pages can be allocated * from MIGRATE_CMA pageblocks and page allocator never * implicitly change migration type of MIGRATE_CMA pageblock. * * The way to use it is to change migratetype of a range of * pageblocks to MIGRATE_CMA which can be done by * __free_pageblock_cma() function. */ MIGRATE_CMA, #endif #ifdef CONFIG_MEMORY_ISOLATION MIGRATE_ISOLATE, /* can't allocate from here */ #endif MIGRATE_TYPES }; /* In mm/page_alloc.c; keep in sync also with show_migration_types() there */ extern const char * const migratetype_names[MIGRATE_TYPES]; #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) # define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \ get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK)) #else # define is_migrate_cma(migratetype) false # define is_migrate_cma_page(_page) false # define is_migrate_cma_folio(folio, pfn) false #endif static inline bool is_migrate_movable(int mt) { return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE; } /* * Check whether a migratetype can be merged with another migratetype. * * It is only mergeable when it can fall back to other migratetypes for * allocation. See fallbacks[MIGRATE_TYPES][3] in page_alloc.c. */ static inline bool migratetype_is_mergeable(int mt) { return mt < MIGRATE_PCPTYPES; } #define for_each_migratetype_order(order, type) \ for (order = 0; order < NR_PAGE_ORDERS; order++) \ for (type = 0; type < MIGRATE_TYPES; type++) extern int page_group_by_mobility_disabled; #define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) #define get_pageblock_migratetype(page) \ get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) #define folio_migratetype(folio) \ get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \ MIGRATETYPE_MASK) struct free_area { struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; }; struct pglist_data; #ifdef CONFIG_NUMA enum numa_stat_item { NUMA_HIT, /* allocated in intended node */ NUMA_MISS, /* allocated in non intended node */ NUMA_FOREIGN, /* was intended here, hit elsewhere */ NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ NUMA_LOCAL, /* allocation from local node */ NUMA_OTHER, /* allocation from other node */ NR_VM_NUMA_EVENT_ITEMS }; #else #define NR_VM_NUMA_EVENT_ITEMS 0 #endif enum zone_stat_item { /* First 128 byte cacheline (assuming 64 bit words) */ NR_FREE_PAGES, NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */ NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE, NR_ZONE_ACTIVE_ANON, NR_ZONE_INACTIVE_FILE, NR_ZONE_ACTIVE_FILE, NR_ZONE_UNEVICTABLE, NR_ZONE_WRITE_PENDING, /* Count of dirty, writeback and unstable pages */ NR_MLOCK, /* mlock()ed pages found and moved off LRU */ /* Second 128 byte cacheline */ NR_BOUNCE, #if IS_ENABLED(CONFIG_ZSMALLOC) NR_ZSPAGES, /* allocated in zsmalloc */ #endif NR_FREE_CMA_PAGES, #ifdef CONFIG_UNACCEPTED_MEMORY NR_UNACCEPTED, #endif NR_VM_ZONE_STAT_ITEMS }; enum node_stat_item { NR_LRU_BASE, NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ NR_ACTIVE_ANON, /* " " " " " */ NR_INACTIVE_FILE, /* " " " " " */ NR_ACTIVE_FILE, /* " " " " " */ NR_UNEVICTABLE, /* " " " " " */ NR_SLAB_RECLAIMABLE_B, NR_SLAB_UNRECLAIMABLE_B, NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ WORKINGSET_NODES, WORKINGSET_REFAULT_BASE, WORKINGSET_REFAULT_ANON = WORKINGSET_REFAULT_BASE, WORKINGSET_REFAULT_FILE, WORKINGSET_ACTIVATE_BASE, WORKINGSET_ACTIVATE_ANON = WORKINGSET_ACTIVATE_BASE, WORKINGSET_ACTIVATE_FILE, WORKINGSET_RESTORE_BASE, WORKINGSET_RESTORE_ANON = WORKINGSET_RESTORE_BASE, WORKINGSET_RESTORE_FILE, WORKINGSET_NODERECLAIM, NR_ANON_MAPPED, /* Mapped anonymous pages */ NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. only modified from process context */ NR_FILE_PAGES, NR_FILE_DIRTY, NR_WRITEBACK, NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ NR_SHMEM_THPS, NR_SHMEM_PMDMAPPED, NR_FILE_THPS, NR_FILE_PMDMAPPED, NR_ANON_THPS, NR_VMSCAN_WRITE, NR_VMSCAN_IMMEDIATE, /* Prioritise for reclaim when writeback ends */ NR_DIRTIED, /* page dirtyings since bootup */ NR_WRITTEN, /* page writings since bootup */ NR_THROTTLED_WRITTEN, /* NR_WRITTEN while reclaim throttled */ NR_KERNEL_MISC_RECLAIMABLE, /* reclaimable non-slab kernel pages */ NR_FOLL_PIN_ACQUIRED, /* via: pin_user_page(), gup flag: FOLL_PIN */ NR_FOLL_PIN_RELEASED, /* pages returned via unpin_user_page() */ NR_KERNEL_STACK_KB, /* measured in KiB */ #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK) NR_KERNEL_SCS_KB, /* measured in KiB */ #endif NR_PAGETABLE, /* used for pagetables */ NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */ #ifdef CONFIG_IOMMU_SUPPORT NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */ #endif #ifdef CONFIG_SWAP NR_SWAPCACHE, #endif #ifdef CONFIG_NUMA_BALANCING PGPROMOTE_SUCCESS, /* promote successfully */ PGPROMOTE_CANDIDATE, /* candidate pages to promote */ #endif /* PGDEMOTE_*: pages demoted */ PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, NR_VM_NODE_STAT_ITEMS }; /* * Returns true if the item should be printed in THPs (/proc/vmstat * currently prints number of anon, file and shmem THPs. But the item * is charged in pages). */ static __always_inline bool vmstat_item_print_in_thp(enum node_stat_item item) { if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return false; return item == NR_ANON_THPS || item == NR_FILE_THPS || item == NR_SHMEM_THPS || item == NR_SHMEM_PMDMAPPED || item == NR_FILE_PMDMAPPED; } /* * Returns true if the value is measured in bytes (most vmstat values are * measured in pages). This defines the API part, the internal representation * might be different. */ static __always_inline bool vmstat_item_in_bytes(int idx) { /* * Global and per-node slab counters track slab pages. * It's expected that changes are multiples of PAGE_SIZE. * Internally values are stored in pages. * * Per-memcg and per-lruvec counters track memory, consumed * by individual slab objects. These counters are actually * byte-precise. */ return (idx == NR_SLAB_RECLAIMABLE_B || idx == NR_SLAB_UNRECLAIMABLE_B); } /* * We do arithmetic on the LRU lists in various places in the code, * so it is important to keep the active lists LRU_ACTIVE higher in * the array than the corresponding inactive lists, and to keep * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. * * This has to be kept in sync with the statistics in zone_stat_item * above and the descriptions in vmstat_text in mm/vmstat.c */ #define LRU_BASE 0 #define LRU_ACTIVE 1 #define LRU_FILE 2 enum lru_list { LRU_INACTIVE_ANON = LRU_BASE, LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, LRU_UNEVICTABLE, NR_LRU_LISTS }; enum vmscan_throttle_state { VMSCAN_THROTTLE_WRITEBACK, VMSCAN_THROTTLE_ISOLATED, VMSCAN_THROTTLE_NOPROGRESS, VMSCAN_THROTTLE_CONGESTED, NR_VMSCAN_THROTTLE, }; #define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++) #define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++) static inline bool is_file_lru(enum lru_list lru) { return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); } static inline bool is_active_lru(enum lru_list lru) { return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); } #define WORKINGSET_ANON 0 #define WORKINGSET_FILE 1 #define ANON_AND_FILE 2 enum lruvec_flags { /* * An lruvec has many dirty pages backed by a congested BDI: * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim. * It can be cleared by cgroup reclaim or kswapd. * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim. * It can only be cleared by kswapd. * * Essentially, kswapd can unthrottle an lruvec throttled by cgroup * reclaim, but not vice versa. This only applies to the root cgroup. * The goal is to prevent cgroup reclaim on the root cgroup (e.g. * memory.reclaim) to unthrottle an unbalanced node (that was throttled * by kswapd). */ LRUVEC_CGROUP_CONGESTED, LRUVEC_NODE_CONGESTED, }; #endif /* !__GENERATING_BOUNDS_H */ /* * Evictable pages are divided into multiple generations. The youngest and the * oldest generation numbers, max_seq and min_seq, are monotonically increasing. * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the * corresponding generation. The gen counter in folio->flags stores gen+1 while * a page is on one of lrugen->folios[]. Otherwise it stores 0. * * A page is added to the youngest generation on faulting. The aging needs to * check the accessed bit at least twice before handing this page over to the * eviction. The first check takes care of the accessed bit set on the initial * fault; the second check makes sure this page hasn't been used since then. * This process, AKA second chance, requires a minimum of two generations, * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive * LRU, e.g., /proc/vmstat, these two generations are considered active; the * rest of generations, if they exist, are considered inactive. See * lru_gen_is_active(). * * PG_active is always cleared while a page is on one of lrugen->folios[] so * that the aging needs not to worry about it. And it's set again when a page * considered active is isolated for non-reclaiming purposes, e.g., migration. * See lru_gen_add_folio() and lru_gen_del_folio(). * * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the * number of categories of the active/inactive LRU when keeping track of * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits * in folio->flags. */ #define MIN_NR_GENS 2U #define MAX_NR_GENS 4U /* * Each generation is divided into multiple tiers. A page accessed N times * through file descriptors is in tier order_base_2(N). A page in the first tier * (N=0,1) is marked by PG_referenced unless it was faulted in through page * tables or read ahead. A page in any other tier (N>1) is marked by * PG_referenced and PG_workingset. This implies a minimum of two tiers is * supported without using additional bits in folio->flags. * * In contrast to moving across generations which requires the LRU lock, moving * across tiers only involves atomic operations on folio->flags and therefore * has a negligible cost in the buffered access path. In the eviction path, * comparisons of refaulted/(evicted+protected) from the first tier and the * rest infer whether pages accessed multiple times through file descriptors * are statistically hot and thus worth protecting. * * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the * number of categories of the active/inactive LRU when keeping track of * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in * folio->flags. */ #define MAX_NR_TIERS 4U #ifndef __GENERATING_BOUNDS_H struct lruvec; struct page_vma_mapped_walk; #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF) #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF) #ifdef CONFIG_LRU_GEN enum { LRU_GEN_ANON, LRU_GEN_FILE, }; enum { LRU_GEN_CORE, LRU_GEN_MM_WALK, LRU_GEN_NONLEAF_YOUNG, NR_LRU_GEN_CAPS }; #define MIN_LRU_BATCH BITS_PER_LONG #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) /* whether to keep historical stats from evicted generations */ #ifdef CONFIG_LRU_GEN_STATS #define NR_HIST_GENS MAX_NR_GENS #else #define NR_HIST_GENS 1U #endif /* * The youngest generation number is stored in max_seq for both anon and file * types as they are aged on an equal footing. The oldest generation numbers are * stored in min_seq[] separately for anon and file types as clean file pages * can be evicted regardless of swap constraints. * * Normally anon and file min_seq are in sync. But if swapping is constrained, * e.g., out of swap space, file min_seq is allowed to advance and leave anon * min_seq behind. * * The number of pages in each generation is eventually consistent and therefore * can be transiently negative when reset_batch_size() is pending. */ struct lru_gen_folio { /* the aging increments the youngest generation number */ unsigned long max_seq; /* the eviction increments the oldest generation numbers */ unsigned long min_seq[ANON_AND_FILE]; /* the birth time of each generation in jiffies */ unsigned long timestamps[MAX_NR_GENS]; /* the multi-gen LRU lists, lazily sorted on eviction */ struct list_head folios[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; /* the multi-gen LRU sizes, eventually consistent */ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; /* the exponential moving average of refaulted */ unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS]; /* the exponential moving average of evicted+protected */ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS]; /* the first tier doesn't need protection, hence the minus one */ unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1]; /* can be modified without holding the LRU lock */ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; /* whether the multi-gen LRU is enabled */ bool enabled; /* the memcg generation this lru_gen_folio belongs to */ u8 gen; /* the list segment this lru_gen_folio belongs to */ u8 seg; /* per-node lru_gen_folio list for global reclaim */ struct hlist_nulls_node list; }; enum { MM_LEAF_TOTAL, /* total leaf entries */ MM_LEAF_OLD, /* old leaf entries */ MM_LEAF_YOUNG, /* young leaf entries */ MM_NONLEAF_TOTAL, /* total non-leaf entries */ MM_NONLEAF_FOUND, /* non-leaf entries found in Bloom filters */ MM_NONLEAF_ADDED, /* non-leaf entries added to Bloom filters */ NR_MM_STATS }; /* double-buffering Bloom filters */ #define NR_BLOOM_FILTERS 2 struct lru_gen_mm_state { /* synced with max_seq after each iteration */ unsigned long seq; /* where the current iteration continues after */ struct list_head *head; /* where the last iteration ended before */ struct list_head *tail; /* Bloom filters flip after each iteration */ unsigned long *filters[NR_BLOOM_FILTERS]; /* the mm stats for debugging */ unsigned long stats[NR_HIST_GENS][NR_MM_STATS]; }; struct lru_gen_mm_walk { /* the lruvec under reclaim */ struct lruvec *lruvec; /* max_seq from lru_gen_folio: can be out of date */ unsigned long seq; /* the next address within an mm to scan */ unsigned long next_addr; /* to batch promoted pages */ int nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; /* to batch the mm stats */ int mm_stats[NR_MM_STATS]; /* total batched items */ int batched; bool can_swap; bool force_scan; }; /* * For each node, memcgs are divided into two generations: the old and the * young. For each generation, memcgs are randomly sharded into multiple bins * to improve scalability. For each bin, the hlist_nulls is virtually divided * into three segments: the head, the tail and the default. * * An onlining memcg is added to the tail of a random bin in the old generation. * The eviction starts at the head of a random bin in the old generation. The * per-node memcg generation counter, whose reminder (mod MEMCG_NR_GENS) indexes * the old generation, is incremented when all its bins become empty. * * There are four operations: * 1. MEMCG_LRU_HEAD, which moves a memcg to the head of a random bin in its * current generation (old or young) and updates its "seg" to "head"; * 2. MEMCG_LRU_TAIL, which moves a memcg to the tail of a random bin in its * current generation (old or young) and updates its "seg" to "tail"; * 3. MEMCG_LRU_OLD, which moves a memcg to the head of a random bin in the old * generation, updates its "gen" to "old" and resets its "seg" to "default"; * 4. MEMCG_LRU_YOUNG, which moves a memcg to the tail of a random bin in the * young generation, updates its "gen" to "young" and resets its "seg" to * "default". * * The events that trigger the above operations are: * 1. Exceeding the soft limit, which triggers MEMCG_LRU_HEAD; * 2. The first attempt to reclaim a memcg below low, which triggers * MEMCG_LRU_TAIL; * 3. The first attempt to reclaim a memcg offlined or below reclaimable size * threshold, which triggers MEMCG_LRU_TAIL; * 4. The second attempt to reclaim a memcg offlined or below reclaimable size * threshold, which triggers MEMCG_LRU_YOUNG; * 5. Attempting to reclaim a memcg below min, which triggers MEMCG_LRU_YOUNG; * 6. Finishing the aging on the eviction path, which triggers MEMCG_LRU_YOUNG; * 7. Offlining a memcg, which triggers MEMCG_LRU_OLD. * * Notes: * 1. Memcg LRU only applies to global reclaim, and the round-robin incrementing * of their max_seq counters ensures the eventual fairness to all eligible * memcgs. For memcg reclaim, it still relies on mem_cgroup_iter(). * 2. There are only two valid generations: old (seq) and young (seq+1). * MEMCG_NR_GENS is set to three so that when reading the generation counter * locklessly, a stale value (seq-1) does not wraparound to young. */ #define MEMCG_NR_GENS 3 #define MEMCG_NR_BINS 8 struct lru_gen_memcg { /* the per-node memcg generation counter */ unsigned long seq; /* each memcg has one lru_gen_folio per node */ unsigned long nr_memcgs[MEMCG_NR_GENS]; /* per-node lru_gen_folio list for global reclaim */ struct hlist_nulls_head fifo[MEMCG_NR_GENS][MEMCG_NR_BINS]; /* protects the above */ spinlock_t lock; }; void lru_gen_init_pgdat(struct pglist_data *pgdat); void lru_gen_init_lruvec(struct lruvec *lruvec); void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); void lru_gen_init_memcg(struct mem_cgroup *memcg); void lru_gen_exit_memcg(struct mem_cgroup *memcg); void lru_gen_online_memcg(struct mem_cgroup *memcg); void lru_gen_offline_memcg(struct mem_cgroup *memcg); void lru_gen_release_memcg(struct mem_cgroup *memcg); void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); #else /* !CONFIG_LRU_GEN */ static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) { } static inline void lru_gen_init_lruvec(struct lruvec *lruvec) { } static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) { } static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) { } static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) { } static inline void lru_gen_online_memcg(struct mem_cgroup *memcg) { } static inline void lru_gen_offline_memcg(struct mem_cgroup *memcg) { } static inline void lru_gen_release_memcg(struct mem_cgroup *memcg) { } static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) { } #endif /* CONFIG_LRU_GEN */ struct lruvec { struct list_head lists[NR_LRU_LISTS]; /* per lruvec lru_lock for memcg */ spinlock_t lru_lock; /* * These track the cost of reclaiming one LRU - file or anon - * over the other. As the observed cost of reclaiming one LRU * increases, the reclaim scan balance tips toward the other. */ unsigned long anon_cost; unsigned long file_cost; /* Non-resident age, driven by LRU movement */ atomic_long_t nonresident_age; /* Refaults at the time of last reclaim cycle */ unsigned long refaults[ANON_AND_FILE]; /* Various lruvec state flags (enum lruvec_flags) */ unsigned long flags; #ifdef CONFIG_LRU_GEN /* evictable pages divided into generations */ struct lru_gen_folio lrugen; #ifdef CONFIG_LRU_GEN_WALKS_MMU /* to concurrently iterate lru_gen_mm_list */ struct lru_gen_mm_state mm_state; #endif #endif /* CONFIG_LRU_GEN */ #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif struct zswap_lruvec_state zswap_lruvec_state; }; /* Isolate for asynchronous migration */ #define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4) /* Isolate unevictable pages */ #define ISOLATE_UNEVICTABLE ((__force isolate_mode_t)0x8) /* LRU Isolation modes. */ typedef unsigned __bitwise isolate_mode_t; enum zone_watermarks { WMARK_MIN, WMARK_LOW, WMARK_HIGH, WMARK_PROMO, NR_WMARK }; /* * One per migratetype for each PAGE_ALLOC_COSTLY_ORDER. Two additional lists * are added for THP. One PCP list is used by GPF_MOVABLE, and the other PCP list * is used by GFP_UNMOVABLE and GFP_RECLAIMABLE. */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define NR_PCP_THP 2 #else #define NR_PCP_THP 0 #endif #define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1)) #define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP) #define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) #define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) #define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) #define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) /* * Flags used in pcp->flags field. * * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the * previous page freeing. To avoid to drain PCP for an accident * high-order page freeing. * * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before * draining PCP for consecutive high-order pages freeing without * allocation if data cache slice of CPU is large enough. To reduce * zone lock contention and keep cache-hot pages reusing. */ #define PCPF_PREV_FREE_HIGH_ORDER BIT(0) #define PCPF_FREE_HIGH_BATCH BIT(1) struct per_cpu_pages { spinlock_t lock; /* Protects lists field */ int count; /* number of pages in the list */ int high; /* high watermark, emptying needed */ int high_min; /* min high watermark */ int high_max; /* max high watermark */ int batch; /* chunk size for buddy add/remove */ u8 flags; /* protected by pcp->lock */ u8 alloc_factor; /* batch scaling factor during allocate */ #ifdef CONFIG_NUMA u8 expire; /* When 0, remote pagesets are drained */ #endif short free_count; /* consecutive free count */ /* Lists of pages, one per migrate type stored on the pcp-lists */ struct list_head lists[NR_PCP_LISTS]; } ____cacheline_aligned_in_smp; struct per_cpu_zonestat { #ifdef CONFIG_SMP s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; s8 stat_threshold; #endif #ifdef CONFIG_NUMA /* * Low priority inaccurate counters that are only folded * on demand. Use a large type to avoid the overhead of * folding during refresh_cpu_vm_stats. */ unsigned long vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; #endif }; struct per_cpu_nodestat { s8 stat_threshold; s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS]; }; #endif /* !__GENERATING_BOUNDS.H */ enum zone_type { /* * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able * to DMA to all of the addressable memory (ZONE_NORMAL). * On architectures where this area covers the whole 32 bit address * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller * DMA addressing constraints. This distinction is important as a 32bit * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit * platforms may need both zones as they support peripherals with * different DMA addressing limitations. */ #ifdef CONFIG_ZONE_DMA ZONE_DMA, #endif #ifdef CONFIG_ZONE_DMA32 ZONE_DMA32, #endif /* * Normal addressable memory is in ZONE_NORMAL. DMA operations can be * performed on pages in ZONE_NORMAL if the DMA devices support * transfers to all addressable memory. */ ZONE_NORMAL, #ifdef CONFIG_HIGHMEM /* * A memory area that is only addressable by the kernel through * mapping portions into its own address space. This is for example * used by i386 to allow the kernel to address the memory beyond * 900MB. The kernel will set up special mappings (page * table entries on i386) for each page that the kernel needs to * access. */ ZONE_HIGHMEM, #endif /* * ZONE_MOVABLE is similar to ZONE_NORMAL, except that it contains * movable pages with few exceptional cases described below. Main use * cases for ZONE_MOVABLE are to make memory offlining/unplug more * likely to succeed, and to locally limit unmovable allocations - e.g., * to increase the number of THP/huge pages. Notable special cases are: * * 1. Pinned pages: (long-term) pinning of movable pages might * essentially turn such pages unmovable. Therefore, we do not allow * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and * faulted, they come from the right zone right away. However, it is * still possible that address space already has pages in * ZONE_MOVABLE at the time when pages are pinned (i.e. user has * touches that memory before pinning). In such case we migrate them * to a different zone. When migration fails - pinning fails. * 2. memblock allocations: kernelcore/movablecore setups might create * situations where ZONE_MOVABLE contains unmovable allocations * after boot. Memory offlining and allocations fail early. * 3. Memory holes: kernelcore/movablecore setups might create very rare * situations where ZONE_MOVABLE contains memory holes after boot, * for example, if we have sections that are only partially * populated. Memory offlining and allocations fail early. * 4. PG_hwpoison pages: while poisoned pages can be skipped during * memory offlining, such pages cannot be allocated. * 5. Unmovable PG_offline pages: in paravirtualized environments, * hotplugged memory blocks might only partially be managed by the * buddy (e.g., via XEN-balloon, Hyper-V balloon, virtio-mem). The * parts not manged by the buddy are unmovable PG_offline pages. In * some cases (virtio-mem), such pages can be skipped during * memory offlining, however, cannot be moved/allocated. These * techniques might use alloc_contig_range() to hide previously * exposed pages from the buddy again (e.g., to implement some sort * of memory unplug in virtio-mem). * 6. ZERO_PAGE(0), kernelcore/movablecore setups might create * situations where ZERO_PAGE(0) which is allocated differently * on different platforms may end up in a movable zone. ZERO_PAGE(0) * cannot be migrated. * 7. Memory-hotplug: when using memmap_on_memory and onlining the * memory to the MOVABLE zone, the vmemmap pages are also placed in * such zone. Such pages cannot be really moved around as they are * self-stored in the range, but they are treated as movable when * the range they describe is about to be offlined. * * In general, no unmovable allocations that degrade memory offlining * should end up in ZONE_MOVABLE. Allocators (like alloc_contig_range()) * have to expect that migrating pages in ZONE_MOVABLE can fail (even * if has_unmovable_pages() states that there are no unmovable pages, * there can be false negatives). */ ZONE_MOVABLE, #ifdef CONFIG_ZONE_DEVICE ZONE_DEVICE, #endif __MAX_NR_ZONES }; #ifndef __GENERATING_BOUNDS_H #define ASYNC_AND_SYNC 2 struct zone { /* Read-mostly fields */ /* zone watermarks, access with *_wmark_pages(zone) macros */ unsigned long _watermark[NR_WMARK]; unsigned long watermark_boost; unsigned long nr_reserved_highatomic; /* * We don't know if the memory that we're going to allocate will be * freeable or/and it will be released eventually, so to avoid totally * wasting several GB of ram we must reserve some of the lower zone * memory (otherwise we risk to run OOM on the lower zones despite * there being tons of freeable ram on the higher zones). This array is * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl * changes. */ long lowmem_reserve[MAX_NR_ZONES]; #ifdef CONFIG_NUMA int node; #endif struct pglist_data *zone_pgdat; struct per_cpu_pages __percpu *per_cpu_pageset; struct per_cpu_zonestat __percpu *per_cpu_zonestats; /* * the high and batch values are copied to individual pagesets for * faster access */ int pageset_high_min; int pageset_high_max; int pageset_batch; #ifndef CONFIG_SPARSEMEM /* * Flags for a pageblock_nr_pages block. See pageblock-flags.h. * In SPARSEMEM, this map is stored in struct mem_section */ unsigned long *pageblock_flags; #endif /* CONFIG_SPARSEMEM */ /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ unsigned long zone_start_pfn; /* * spanned_pages is the total pages spanned by the zone, including * holes, which is calculated as: * spanned_pages = zone_end_pfn - zone_start_pfn; * * present_pages is physical pages existing within the zone, which * is calculated as: * present_pages = spanned_pages - absent_pages(pages in holes); * * present_early_pages is present pages existing within the zone * located on memory available since early boot, excluding hotplugged * memory. * * managed_pages is present pages managed by the buddy system, which * is calculated as (reserved_pages includes pages allocated by the * bootmem allocator): * managed_pages = present_pages - reserved_pages; * * cma pages is present pages that are assigned for CMA use * (MIGRATE_CMA). * * So present_pages may be used by memory hotplug or memory power * management logic to figure out unmanaged pages by checking * (present_pages - managed_pages). And managed_pages should be used * by page allocator and vm scanner to calculate all kinds of watermarks * and thresholds. * * Locking rules: * * zone_start_pfn and spanned_pages are protected by span_seqlock. * It is a seqlock because it has to be read outside of zone->lock, * and it is done in the main allocator path. But, it is written * quite infrequently. * * The span_seq lock is declared along with zone->lock because it is * frequently read in proximity to zone->lock. It's good to * give them a chance of being in the same cacheline. * * Write access to present_pages at runtime should be protected by * mem_hotplug_begin/done(). Any reader who can't tolerant drift of * present_pages should use get_online_mems() to get a stable value. */ atomic_long_t managed_pages; unsigned long spanned_pages; unsigned long present_pages; #if defined(CONFIG_MEMORY_HOTPLUG) unsigned long present_early_pages; #endif #ifdef CONFIG_CMA unsigned long cma_pages; #endif const char *name; #ifdef CONFIG_MEMORY_ISOLATION /* * Number of isolated pageblock. It is used to solve incorrect * freepage counting problem due to racy retrieving migratetype * of pageblock. Protected by zone->lock. */ unsigned long nr_isolate_pageblock; #endif #ifdef CONFIG_MEMORY_HOTPLUG /* see spanned/present_pages for more description */ seqlock_t span_seqlock; #endif int initialized; /* Write-intensive fields used from the page allocator */ CACHELINE_PADDING(_pad1_); /* free areas of different sizes */ struct free_area free_area[NR_PAGE_ORDERS]; #ifdef CONFIG_UNACCEPTED_MEMORY /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */ struct list_head unaccepted_pages; #endif /* zone flags, see below */ unsigned long flags; /* Primarily protects free_area */ spinlock_t lock; /* Write-intensive fields used by compaction and vmstats. */ CACHELINE_PADDING(_pad2_); /* * When free pages are below this point, additional steps are taken * when reading the number of free pages to avoid per-cpu counter * drift allowing watermarks to be breached */ unsigned long percpu_drift_mark; #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* pfn where compaction free scanner should start */ unsigned long compact_cached_free_pfn; /* pfn where compaction migration scanner should start */ unsigned long compact_cached_migrate_pfn[ASYNC_AND_SYNC]; unsigned long compact_init_migrate_pfn; unsigned long compact_init_free_pfn; #endif #ifdef CONFIG_COMPACTION /* * On compaction failure, 1<<compact_defer_shift compactions * are skipped before trying again. The number attempted since * last failure is tracked with compact_considered. * compact_order_failed is the minimum compaction failed order. */ unsigned int compact_considered; unsigned int compact_defer_shift; int compact_order_failed; #endif #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* Set to true when the PG_migrate_skip bits should be cleared */ bool compact_blockskip_flush; #endif bool contiguous; CACHELINE_PADDING(_pad3_); /* Zone statistics */ atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS]; } ____cacheline_internodealigned_in_smp; enum pgdat_flags { PGDAT_DIRTY, /* reclaim scanning has recently found * many dirty file pages at the tail * of the LRU. */ PGDAT_WRITEBACK, /* reclaim scanning has recently found * many pages under writeback */ PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ }; enum zone_flags { ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. * Cleared when kswapd is woken. */ ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */ ZONE_BELOW_HIGH, /* zone is below high watermark. */ }; static inline unsigned long zone_managed_pages(struct zone *zone) { return (unsigned long)atomic_long_read(&zone->managed_pages); } static inline unsigned long zone_cma_pages(struct zone *zone) { #ifdef CONFIG_CMA return zone->cma_pages; #else return 0; #endif } static inline unsigned long zone_end_pfn(const struct zone *zone) { return zone->zone_start_pfn + zone->spanned_pages; } static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) { return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); } static inline bool zone_is_initialized(struct zone *zone) { return zone->initialized; } static inline bool zone_is_empty(struct zone *zone) { return zone->spanned_pages == 0; } #ifndef BUILD_VDSO32_64 /* * The zone field is never updated after free_area_init_core() * sets it, so none of the operations on it need to be atomic. */ /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */ #define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) #define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) #define LRU_GEN_PGOFF (KASAN_TAG_PGOFF - LRU_GEN_WIDTH) #define LRU_REFS_PGOFF (LRU_GEN_PGOFF - LRU_REFS_WIDTH) /* * Define the bit shifts to access each section. For non-existent * sections we define the shift as 0; that plus a 0 mask ensures * the compiler will optimise away reference to them. */ #define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) #define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0)) #define KASAN_TAG_PGSHIFT (KASAN_TAG_PGOFF * (KASAN_TAG_WIDTH != 0)) /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ #ifdef NODE_NOT_IN_PAGE_FLAGS #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF) ? \ SECTIONS_PGOFF : ZONES_PGOFF) #else #define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT) #define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF) ? \ NODES_PGOFF : ZONES_PGOFF) #endif #define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0)) #define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) #define NODES_MASK ((1UL << NODES_WIDTH) - 1) #define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) #define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1) #define KASAN_TAG_MASK ((1UL << KASAN_TAG_WIDTH) - 1) #define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1) static inline enum zone_type page_zonenum(const struct page *page) { ASSERT_EXCLUSIVE_BITS(page->flags, ZONES_MASK << ZONES_PGSHIFT); return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } static inline enum zone_type folio_zonenum(const struct folio *folio) { return page_zonenum(&folio->page); } #ifdef CONFIG_ZONE_DEVICE static inline bool is_zone_device_page(const struct page *page) { return page_zonenum(page) == ZONE_DEVICE; } /* * Consecutive zone device pages should not be merged into the same sgl * or bvec segment with other types of pages or if they belong to different * pgmaps. Otherwise getting the pgmap of a given segment is not possible * without scanning the entire segment. This helper returns true either if * both pages are not zone device pages or both pages are zone device pages * with the same pgmap. */ static inline bool zone_device_pages_have_same_pgmap(const struct page *a, const struct page *b) { if (is_zone_device_page(a) != is_zone_device_page(b)) return false; if (!is_zone_device_page(a)) return true; return a->pgmap == b->pgmap; } extern void memmap_init_zone_device(struct zone *, unsigned long, unsigned long, struct dev_pagemap *); #else static inline bool is_zone_device_page(const struct page *page) { return false; } static inline bool zone_device_pages_have_same_pgmap(const struct page *a, const struct page *b) { return true; } #endif static inline bool folio_is_zone_device(const struct folio *folio) { return is_zone_device_page(&folio->page); } static inline bool is_zone_movable_page(const struct page *page) { return page_zonenum(page) == ZONE_MOVABLE; } static inline bool folio_is_zone_movable(const struct folio *folio) { return folio_zonenum(folio) == ZONE_MOVABLE; } #endif /* * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty * intersection with the given zone */ static inline bool zone_intersects(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages) { if (zone_is_empty(zone)) return false; if (start_pfn >= zone_end_pfn(zone) || start_pfn + nr_pages <= zone->zone_start_pfn) return false; return true; } /* * The "priority" of VM scanning is how much of the queues we will scan in one * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the * queues ("queue_length >> 12") during an aging round. */ #define DEF_PRIORITY 12 /* Maximum number of zones on a zonelist */ #define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES) enum { ZONELIST_FALLBACK, /* zonelist with fallback */ #ifdef CONFIG_NUMA /* * The NUMA zonelists are doubled because we need zonelists that * restrict the allocations to a single node for __GFP_THISNODE. */ ZONELIST_NOFALLBACK, /* zonelist without fallback (__GFP_THISNODE) */ #endif MAX_ZONELISTS }; /* * This struct contains information about a zone in a zonelist. It is stored * here to avoid dereferences into large structures and lookups of tables */ struct zoneref { struct zone *zone; /* Pointer to actual zone */ int zone_idx; /* zone_idx(zoneref->zone) */ }; /* * One allocation request operates on a zonelist. A zonelist * is a list of zones, the first one is the 'goal' of the * allocation, the other zones are fallback zones, in decreasing * priority. * * To speed the reading of the zonelist, the zonerefs contain the zone index * of the entry being read. Helper functions to access information given * a struct zoneref are * * zonelist_zone() - Return the struct zone * for an entry in _zonerefs * zonelist_zone_idx() - Return the index of the zone for an entry * zonelist_node_idx() - Return the index of the node for an entry */ struct zonelist { struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1]; }; /* * The array of struct pages for flatmem. * It must be declared for SPARSEMEM as well because there are configurations * that rely on that. */ extern struct page *mem_map; #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct deferred_split { spinlock_t split_queue_lock; struct list_head split_queue; unsigned long split_queue_len; }; #endif #ifdef CONFIG_MEMORY_FAILURE /* * Per NUMA node memory failure handling statistics. */ struct memory_failure_stats { /* * Number of raw pages poisoned. * Cases not accounted: memory outside kernel control, offline page, * arch-specific memory_failure (SGX), hwpoison_filter() filtered * error events, and unpoison actions from hwpoison_unpoison. */ unsigned long total; /* * Recovery results of poisoned raw pages handled by memory_failure, * in sync with mf_result. * total = ignored + failed + delayed + recovered. * total * PAGE_SIZE * #nodes = /proc/meminfo/HardwareCorrupted. */ unsigned long ignored; unsigned long failed; unsigned long delayed; unsigned long recovered; }; #endif /* * On NUMA machines, each NUMA node would have a pg_data_t to describe * it's memory layout. On UMA machines there is a single pglist_data which * describes the whole memory. * * Memory statistics and page replacement data structures are maintained on a * per-zone basis. */ typedef struct pglist_data { /* * node_zones contains just the zones for THIS node. Not all of the * zones may be populated, but it is the full list. It is referenced by * this node's node_zonelists as well as other node's node_zonelists. */ struct zone node_zones[MAX_NR_ZONES]; /* * node_zonelists contains references to all zones in all nodes. * Generally the first zones will be references to this node's * node_zones. */ struct zonelist node_zonelists[MAX_ZONELISTS]; int nr_zones; /* number of populated zones in this node */ #ifdef CONFIG_FLATMEM /* means !SPARSEMEM */ struct page *node_mem_map; #ifdef CONFIG_PAGE_EXTENSION struct page_ext *node_page_ext; #endif #endif #if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT) /* * Must be held any time you expect node_start_pfn, * node_present_pages, node_spanned_pages or nr_zones to stay constant. * Also synchronizes pgdat->first_deferred_pfn during deferred page * init. * * pgdat_resize_lock() and pgdat_resize_unlock() are provided to * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG * or CONFIG_DEFERRED_STRUCT_PAGE_INIT. * * Nests above zone->lock and zone->span_seqlock */ spinlock_t node_size_lock; #endif unsigned long node_start_pfn; unsigned long node_present_pages; /* total number of physical pages */ unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; /* workqueues for throttling reclaim for different reasons. */ wait_queue_head_t reclaim_wait[NR_VMSCAN_THROTTLE]; atomic_t nr_writeback_throttled;/* nr of writeback-throttled tasks */ unsigned long nr_reclaim_start; /* nr pages written while throttled * when throttling started. */ #ifdef CONFIG_MEMORY_HOTPLUG struct mutex kswapd_lock; #endif struct task_struct *kswapd; /* Protected by kswapd_lock */ int kswapd_order; enum zone_type kswapd_highest_zoneidx; int kswapd_failures; /* Number of 'reclaimed == 0' runs */ #ifdef CONFIG_COMPACTION int kcompactd_max_order; enum zone_type kcompactd_highest_zoneidx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; bool proactive_compact_trigger; #endif /* * This is a per-node reserve of pages that are not available * to userspace allocations. */ unsigned long totalreserve_pages; #ifdef CONFIG_NUMA /* * node reclaim becomes active if more unmapped pages exist. */ unsigned long min_unmapped_pages; unsigned long min_slab_pages; #endif /* CONFIG_NUMA */ /* Write-intensive fields used by page reclaim */ CACHELINE_PADDING(_pad1_); #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* * If memory initialisation on large machines is deferred then this * is the first PFN that needs to be initialised. */ unsigned long first_deferred_pfn; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ #ifdef CONFIG_TRANSPARENT_HUGEPAGE struct deferred_split deferred_split_queue; #endif #ifdef CONFIG_NUMA_BALANCING /* start time in ms of current promote rate limit period */ unsigned int nbp_rl_start; /* number of promote candidate pages at start time of current rate limit period */ unsigned long nbp_rl_nr_cand; /* promote threshold in ms */ unsigned int nbp_threshold; /* start time in ms of current promote threshold adjustment period */ unsigned int nbp_th_start; /* * number of promote candidate pages at start time of current promote * threshold adjustment period */ unsigned long nbp_th_nr_cand; #endif /* Fields commonly accessed by the page reclaim scanner */ /* * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED. * * Use mem_cgroup_lruvec() to look up lruvecs. */ struct lruvec __lruvec; unsigned long flags; #ifdef CONFIG_LRU_GEN /* kswap mm walk data */ struct lru_gen_mm_walk mm_walk; /* lru_gen_folio list */ struct lru_gen_memcg memcg_lru; #endif CACHELINE_PADDING(_pad2_); /* Per-node vmstats */ struct per_cpu_nodestat __percpu *per_cpu_nodestats; atomic_long_t vm_stat[NR_VM_NODE_STAT_ITEMS]; #ifdef CONFIG_NUMA struct memory_tier __rcu *memtier; #endif #ifdef CONFIG_MEMORY_FAILURE struct memory_failure_stats mf_stats; #endif } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) #define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages) #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) { return pgdat->node_start_pfn + pgdat->node_spanned_pages; } #include <linux/memory_hotplug.h> void build_all_zonelists(pg_data_t *pgdat); void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order, enum zone_type highest_zoneidx); bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags, long free_pages); bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx, unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx); /* * Memory initialization context, use to differentiate memory added by * the platform statically or via memory hotplug interface. */ enum meminit_context { MEMINIT_EARLY, MEMINIT_HOTPLUG, }; extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size); extern void lruvec_init(struct lruvec *lruvec); static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec) { #ifdef CONFIG_MEMCG return lruvec->pgdat; #else return container_of(lruvec, struct pglist_data, __lruvec); #endif } #ifdef CONFIG_HAVE_MEMORYLESS_NODES int local_memory_node(int node_id); #else static inline int local_memory_node(int node_id) { return node_id; }; #endif /* * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. */ #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) #ifdef CONFIG_ZONE_DEVICE static inline bool zone_is_zone_device(struct zone *zone) { return zone_idx(zone) == ZONE_DEVICE; } #else static inline bool zone_is_zone_device(struct zone *zone) { return false; } #endif /* * Returns true if a zone has pages managed by the buddy allocator. * All the reclaim decisions have to use this function rather than * populated_zone(). If the whole zone is reserved then we can easily * end up with populated_zone() && !managed_zone(). */ static inline bool managed_zone(struct zone *zone) { return zone_managed_pages(zone); } /* Returns true if a zone has memory */ static inline bool populated_zone(struct zone *zone) { return zone->present_pages; } #ifdef CONFIG_NUMA static inline int zone_to_nid(struct zone *zone) { return zone->node; } static inline void zone_set_nid(struct zone *zone, int nid) { zone->node = nid; } #else static inline int zone_to_nid(struct zone *zone) { return 0; } static inline void zone_set_nid(struct zone *zone, int nid) {} #endif extern int movable_zone; static inline int is_highmem_idx(enum zone_type idx) { #ifdef CONFIG_HIGHMEM return (idx == ZONE_HIGHMEM || (idx == ZONE_MOVABLE && movable_zone == ZONE_HIGHMEM)); #else return 0; #endif } /** * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. * @zone: pointer to struct zone variable * Return: 1 for a highmem zone, 0 otherwise */ static inline int is_highmem(struct zone *zone) { return is_highmem_idx(zone_idx(zone)); } #ifdef CONFIG_ZONE_DMA bool has_managed_dma(void); #else static inline bool has_managed_dma(void) { return false; } #endif #ifndef CONFIG_NUMA extern struct pglist_data contig_page_data; static inline struct pglist_data *NODE_DATA(int nid) { return &contig_page_data; } #else /* CONFIG_NUMA */ #include <asm/mmzone.h> #endif /* !CONFIG_NUMA */ extern stru |