2102 2102 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | // SPDX-License-Identifier: GPL-2.0 /* * tracing clocks * * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * * Implements 3 trace clock variants, with differing scalability/precision * tradeoffs: * * - local: CPU-local trace clock * - medium: scalable global clock with some jitter * - global: globally monotonic, serialized clock * * Tracer plugins will chose a default from these clocks. */ #include <linux/spinlock.h> #include <linux/irqflags.h> #include <linux/hardirq.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/ktime.h> #include <linux/trace_clock.h> /* * trace_clock_local(): the simplest and least coherent tracing clock. * * Useful for tracing that does not cross to other CPUs nor * does it go through idle events. */ u64 notrace trace_clock_local(void) { u64 clock; /* * sched_clock() is an architecture implemented, fast, scalable, * lockless clock. It is not guaranteed to be coherent across * CPUs, nor across CPU idle events. */ preempt_disable_notrace(); clock = sched_clock(); preempt_enable_notrace(); return clock; } EXPORT_SYMBOL_GPL(trace_clock_local); /* * trace_clock(): 'between' trace clock. Not completely serialized, * but not completely incorrect when crossing CPUs either. * * This is based on cpu_clock(), which will allow at most ~1 jiffy of * jitter between CPUs. So it's a pretty scalable clock, but there * can be offsets in the trace data. */ u64 notrace trace_clock(void) { return local_clock(); } EXPORT_SYMBOL_GPL(trace_clock); /* * trace_jiffy_clock(): Simply use jiffies as a clock counter. * Note that this use of jiffies_64 is not completely safe on * 32-bit systems. But the window is tiny, and the effect if * we are affected is that we will have an obviously bogus * timestamp on a trace event - i.e. not life threatening. */ u64 notrace trace_clock_jiffies(void) { return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES); } EXPORT_SYMBOL_GPL(trace_clock_jiffies); /* * trace_clock_global(): special globally coherent trace clock * * It has higher overhead than the other trace clocks but is still * an order of magnitude faster than GTOD derived hardware clocks. * * Used by plugins that need globally coherent timestamps. */ /* keep prev_time and lock in the same cacheline. */ static struct { u64 prev_time; arch_spinlock_t lock; } trace_clock_struct ____cacheline_aligned_in_smp = { .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, }; u64 notrace trace_clock_global(void) { unsigned long flags; int this_cpu; u64 now, prev_time; raw_local_irq_save(flags); this_cpu = raw_smp_processor_id(); /* * The global clock "guarantees" that the events are ordered * between CPUs. But if two events on two different CPUS call * trace_clock_global at roughly the same time, it really does * not matter which one gets the earlier time. Just make sure * that the same CPU will always show a monotonic clock. * * Use a read memory barrier to get the latest written * time that was recorded. */ smp_rmb(); prev_time = READ_ONCE(trace_clock_struct.prev_time); now = sched_clock_cpu(this_cpu); /* Make sure that now is always greater than or equal to prev_time */ if ((s64)(now - prev_time) < 0) now = prev_time; /* * If in an NMI context then dont risk lockups and simply return * the current time. */ if (unlikely(in_nmi())) goto out; /* Tracing can cause strange recursion, always use a try lock */ if (arch_spin_trylock(&trace_clock_struct.lock)) { /* Reread prev_time in case it was already updated */ prev_time = READ_ONCE(trace_clock_struct.prev_time); if ((s64)(now - prev_time) < 0) now = prev_time; trace_clock_struct.prev_time = now; /* The unlock acts as the wmb for the above rmb */ arch_spin_unlock(&trace_clock_struct.lock); } out: raw_local_irq_restore(flags); return now; } EXPORT_SYMBOL_GPL(trace_clock_global); static atomic64_t trace_counter; /* * trace_clock_counter(): simply an atomic counter. * Use the trace_counter "counter" for cases where you do not care * about timings, but are interested in strict ordering. */ u64 notrace trace_clock_counter(void) { return atomic64_inc_return(&trace_counter); } |
17 18 14 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com * Written by Alex Tomas <alex@clusterfs.com> */ /* * mballoc.c contains the multiblocks allocation routines */ #include "ext4_jbd2.h" #include "mballoc.h" #include <linux/log2.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/nospec.h> #include <linux/backing-dev.h> #include <linux/freezer.h> #include <trace/events/ext4.h> #include <kunit/static_stub.h> /* * MUSTDO: * - test ext4_ext_search_left() and ext4_ext_search_right() * - search for metadata in few groups * * TODO v4: * - normalization should take into account whether file is still open * - discard preallocations if no free space left (policy?) * - don't normalize tails * - quota * - reservation for superuser * * TODO v3: * - bitmap read-ahead (proposed by Oleg Drokin aka green) * - track min/max extents in each group for better group selection * - mb_mark_used() may allocate chunk right after splitting buddy * - tree of groups sorted by number of free blocks * - error handling */ /* * The allocation request involve request for multiple number of blocks * near to the goal(block) value specified. * * During initialization phase of the allocator we decide to use the * group preallocation or inode preallocation depending on the size of * the file. The size of the file could be the resulting file size we * would have after allocation, or the current file size, which ever * is larger. If the size is less than sbi->s_mb_stream_request we * select to use the group preallocation. The default value of * s_mb_stream_request is 16 blocks. This can also be tuned via * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in * terms of number of blocks. * * The main motivation for having small file use group preallocation is to * ensure that we have small files closer together on the disk. * * First stage the allocator looks at the inode prealloc list, * ext4_inode_info->i_prealloc_list, which contains list of prealloc * spaces for this particular inode. The inode prealloc space is * represented as: * * pa_lstart -> the logical start block for this prealloc space * pa_pstart -> the physical start block for this prealloc space * pa_len -> length for this prealloc space (in clusters) * pa_free -> free space available in this prealloc space (in clusters) * * The inode preallocation space is used looking at the _logical_ start * block. If only the logical file block falls within the range of prealloc * space we will consume the particular prealloc space. This makes sure that * we have contiguous physical blocks representing the file blocks * * The important thing to be noted in case of inode prealloc space is that * we don't modify the values associated to inode prealloc space except * pa_free. * * If we are not able to find blocks in the inode prealloc space and if we * have the group allocation flag set then we look at the locality group * prealloc space. These are per CPU prealloc list represented as * * ext4_sb_info.s_locality_groups[smp_processor_id()] * * The reason for having a per cpu locality group is to reduce the contention * between CPUs. It is possible to get scheduled at this point. * * The locality group prealloc space is used looking at whether we have * enough free space (pa_free) within the prealloc space. * * If we can't allocate blocks via inode prealloc or/and locality group * prealloc then we look at the buddy cache. The buddy cache is represented * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets * mapped to the buddy and bitmap information regarding different * groups. The buddy information is attached to buddy cache inode so that * we can access them through the page cache. The information regarding * each group is loaded via ext4_mb_load_buddy. The information involve * block bitmap and buddy information. The information are stored in the * inode as: * * { page } * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... * * * one block each for bitmap and buddy information. So for each group we * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / * blocksize) blocks. So it can have information regarding groups_per_page * which is blocks_per_page/2 * * The buddy cache inode is not stored on disk. The inode is thrown * away when the filesystem is unmounted. * * We look for count number of blocks in the buddy cache. If we were able * to locate that many free blocks we return with additional information * regarding rest of the contiguous physical block available * * Before allocating blocks via buddy cache we normalize the request * blocks. This ensure we ask for more blocks that we needed. The extra * blocks that we get after allocation is added to the respective prealloc * list. In case of inode preallocation we follow a list of heuristics * based on file size. This can be found in ext4_mb_normalize_request. If * we are doing a group prealloc we try to normalize the request to * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is * dependent on the cluster size; for non-bigalloc file systems, it is * 512 blocks. This can be tuned via * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in * terms of number of blocks. If we have mounted the file system with -O * stripe=<value> option the group prealloc request is normalized to the * smallest multiple of the stripe value (sbi->s_stripe) which is * greater than the default mb_group_prealloc. * * If "mb_optimize_scan" mount option is set, we maintain in memory group info * structures in two data structures: * * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) * * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) * * This is an array of lists where the index in the array represents the * largest free order in the buddy bitmap of the participating group infos of * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total * number of buddy bitmap orders possible) number of lists. Group-infos are * placed in appropriate lists. * * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) * * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) * * This is an array of lists where in the i-th list there are groups with * average fragment size >= 2^i and < 2^(i+1). The average fragment size * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. * Note that we don't bother with a special list for completely empty groups * so we only have MB_NUM_ORDERS(sb) lists. * * When "mb_optimize_scan" mount option is set, mballoc consults the above data * structures to decide the order in which groups are to be traversed for * fulfilling an allocation request. * * At CR_POWER2_ALIGNED , we look for groups which have the largest_free_order * >= the order of the request. We directly look at the largest free order list * in the data structure (1) above where largest_free_order = order of the * request. If that list is empty, we look at remaining list in the increasing * order of largest_free_order. This allows us to perform CR_POWER2_ALIGNED * lookup in O(1) time. * * At CR_GOAL_LEN_FAST, we only consider groups where * average fragment size > request size. So, we lookup a group which has average * fragment size just above or equal to request size using our average fragment * size group lists (data structure 2) in O(1) time. * * At CR_BEST_AVAIL_LEN, we aim to optimize allocations which can't be satisfied * in CR_GOAL_LEN_FAST. The fact that we couldn't find a group in * CR_GOAL_LEN_FAST suggests that there is no BG that has avg * fragment size > goal length. So before falling to the slower * CR_GOAL_LEN_SLOW, in CR_BEST_AVAIL_LEN we proactively trim goal length and * then use the same fragment lists as CR_GOAL_LEN_FAST to find a BG with a big * enough average fragment size. This increases the chances of finding a * suitable block group in O(1) time and results in faster allocation at the * cost of reduced size of allocation. * * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in * linear order which requires O(N) search time for each CR_POWER2_ALIGNED and * CR_GOAL_LEN_FAST phase. * * The regular allocator (using the buddy cache) supports a few tunables. * * /sys/fs/ext4/<partition>/mb_min_to_scan * /sys/fs/ext4/<partition>/mb_max_to_scan * /sys/fs/ext4/<partition>/mb_order2_req * /sys/fs/ext4/<partition>/mb_max_linear_groups * * The regular allocator uses buddy scan only if the request len is power of * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The * value of s_mb_order2_reqs can be tuned via * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to * stripe size (sbi->s_stripe), we try to search for contiguous block in * stripe size. This should result in better allocation on RAID setups. If * not, we search in the specific group using bitmap for best extents. The * tunable min_to_scan and max_to_scan control the behaviour here. * min_to_scan indicate how long the mballoc __must__ look for a best * extent and max_to_scan indicates how long the mballoc __can__ look for a * best extent in the found extents. Searching for the blocks starts with * the group specified as the goal value in allocation context via * ac_g_ex. Each group is first checked based on the criteria whether it * can be used for allocation. ext4_mb_good_group explains how the groups are * checked. * * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not * get traversed linearly. That may result in subsequent allocations being not * close to each other. And so, the underlying device may get filled up in a * non-linear fashion. While that may not matter on non-rotational devices, for * rotational devices that may result in higher seek times. "mb_max_linear_groups" * tells mballoc how many groups mballoc should search linearly before * performing consulting above data structures for more efficient lookups. For * non rotational devices, this value defaults to 0 and for rotational devices * this is set to MB_DEFAULT_LINEAR_LIMIT. * * Both the prealloc space are getting populated as above. So for the first * request we will hit the buddy cache which will result in this prealloc * space getting filled. The prealloc space is then later used for the * subsequent request. */ /* * mballoc operates on the following data: * - on-disk bitmap * - in-core buddy (actually includes buddy and bitmap) * - preallocation descriptors (PAs) * * there are two types of preallocations: * - inode * assiged to specific inode and can be used for this inode only. * it describes part of inode's space preallocated to specific * physical blocks. any block from that preallocated can be used * independent. the descriptor just tracks number of blocks left * unused. so, before taking some block from descriptor, one must * make sure corresponded logical block isn't allocated yet. this * also means that freeing any block within descriptor's range * must discard all preallocated blocks. * - locality group * assigned to specific locality group which does not translate to * permanent set of inodes: inode can join and leave group. space * from this type of preallocation can be used for any inode. thus * it's consumed from the beginning to the end. * * relation between them can be expressed as: * in-core buddy = on-disk bitmap + preallocation descriptors * * this mean blocks mballoc considers used are: * - allocated blocks (persistent) * - preallocated blocks (non-persistent) * * consistency in mballoc world means that at any time a block is either * free or used in ALL structures. notice: "any time" should not be read * literally -- time is discrete and delimited by locks. * * to keep it simple, we don't use block numbers, instead we count number of * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. * * all operations can be expressed as: * - init buddy: buddy = on-disk + PAs * - new PA: buddy += N; PA = N * - use inode PA: on-disk += N; PA -= N * - discard inode PA buddy -= on-disk - PA; PA = 0 * - use locality group PA on-disk += N; PA -= N * - discard locality group PA buddy -= PA; PA = 0 * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap * is used in real operation because we can't know actual used * bits from PA, only from on-disk bitmap * * if we follow this strict logic, then all operations above should be atomic. * given some of them can block, we'd have to use something like semaphores * killing performance on high-end SMP hardware. let's try to relax it using * the following knowledge: * 1) if buddy is referenced, it's already initialized * 2) while block is used in buddy and the buddy is referenced, * nobody can re-allocate that block * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has * bit set and PA claims same block, it's OK. IOW, one can set bit in * on-disk bitmap if buddy has same bit set or/and PA covers corresponded * block * * so, now we're building a concurrency table: * - init buddy vs. * - new PA * blocks for PA are allocated in the buddy, buddy must be referenced * until PA is linked to allocation group to avoid concurrent buddy init * - use inode PA * we need to make sure that either on-disk bitmap or PA has uptodate data * given (3) we care that PA-=N operation doesn't interfere with init * - discard inode PA * the simplest way would be to have buddy initialized by the discard * - use locality group PA * again PA-=N must be serialized with init * - discard locality group PA * the simplest way would be to have buddy initialized by the discard * - new PA vs. * - use inode PA * i_data_sem serializes them * - discard inode PA * discard process must wait until PA isn't used by another process * - use locality group PA * some mutex should serialize them * - discard locality group PA * discard process must wait until PA isn't used by another process * - use inode PA * - use inode PA * i_data_sem or another mutex should serializes them * - discard inode PA * discard process must wait until PA isn't used by another process * - use locality group PA * nothing wrong here -- they're different PAs covering different blocks * - discard locality group PA * discard process must wait until PA isn't used by another process * * now we're ready to make few consequences: * - PA is referenced and while it is no discard is possible * - PA is referenced until block isn't marked in on-disk bitmap * - PA changes only after on-disk bitmap * - discard must not compete with init. either init is done before * any discard or they're serialized somehow * - buddy init as sum of on-disk bitmap and PAs is done atomically * * a special case when we've used PA to emptiness. no need to modify buddy * in this case, but we should care about concurrent init * */ /* * Logic in few words: * * - allocation: * load group * find blocks * mark bits in on-disk bitmap * release group * * - use preallocation: * find proper PA (per-inode or group) * load group * mark bits in on-disk bitmap * release group * release PA * * - free: * load group * mark bits in on-disk bitmap * release group * * - discard preallocations in group: * mark PAs deleted * move them onto local list * load on-disk bitmap * load group * remove PA from object (inode or locality group) * mark free blocks in-core * * - discard inode's preallocations: */ /* * Locking rules * * Locks: * - bitlock on a group (group) * - object (inode/locality) (object) * - per-pa lock (pa) * - cr_power2_aligned lists lock (cr_power2_aligned) * - cr_goal_len_fast lists lock (cr_goal_len_fast) * * Paths: * - new pa * object * group * * - find and use pa: * pa * * - release consumed pa: * pa * group * object * * - generate in-core bitmap: * group * pa * * - discard all for given object (inode, locality group): * object * pa * group * * - discard all for given group: * group * pa * group * object * * - allocation path (ext4_mb_regular_allocator) * group * cr_power2_aligned/cr_goal_len_fast */ static struct kmem_cache *ext4_pspace_cachep; static struct kmem_cache *ext4_ac_cachep; static struct kmem_cache *ext4_free_data_cachep; /* We create slab caches for groupinfo data structures based on the * superblock block size. There will be one per mounted filesystem for * each unique s_blocksize_bits */ #define NR_GRPINFO_CACHES 8 static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", "ext4_groupinfo_64k", "ext4_groupinfo_128k" }; static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group); static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); static bool ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, enum criteria cr); static int ext4_try_to_trim_range(struct super_block *sb, struct ext4_buddy *e4b, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks); /* * The algorithm using this percpu seq counter goes below: * 1. We sample the percpu discard_pa_seq counter before trying for block * allocation in ext4_mb_new_blocks(). * 2. We increment this percpu discard_pa_seq counter when we either allocate * or free these blocks i.e. while marking those blocks as used/free in * mb_mark_used()/mb_free_blocks(). * 3. We also increment this percpu seq counter when we successfully identify * that the bb_prealloc_list is not empty and hence proceed for discarding * of those PAs inside ext4_mb_discard_group_preallocations(). * * Now to make sure that the regular fast path of block allocation is not * affected, as a small optimization we only sample the percpu seq counter * on that cpu. Only when the block allocation fails and when freed blocks * found were 0, that is when we sample percpu seq counter for all cpus using * below function ext4_get_discard_pa_seq_sum(). This happens after making * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. */ static DEFINE_PER_CPU(u64, discard_pa_seq); static inline u64 ext4_get_discard_pa_seq_sum(void) { int __cpu; u64 __seq = 0; for_each_possible_cpu(__cpu) __seq += per_cpu(discard_pa_seq, __cpu); return __seq; } static inline void *mb_correct_addr_and_bit(int *bit, void *addr) { #if BITS_PER_LONG == 64 *bit += ((unsigned long) addr & 7UL) << 3; addr = (void *) ((unsigned long) addr & ~7UL); #elif BITS_PER_LONG == 32 *bit += ((unsigned long) addr & 3UL) << 3; addr = (void *) ((unsigned long) addr & ~3UL); #else #error "how many bits you are?!" #endif return addr; } static inline int mb_test_bit(int bit, void *addr) { /* * ext4_test_bit on architecture like powerpc * needs unsigned long aligned address */ addr = mb_correct_addr_and_bit(&bit, addr); return ext4_test_bit(bit, addr); } static inline void mb_set_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); ext4_set_bit(bit, addr); } static inline void mb_clear_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); ext4_clear_bit(bit, addr); } static inline int mb_test_and_clear_bit(int bit, void *addr) { addr = mb_correct_addr_and_bit(&bit, addr); return ext4_test_and_clear_bit(bit, addr); } static inline int mb_find_next_zero_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; } static inline int mb_find_next_bit(void *addr, int max, int start) { int fix = 0, ret, tmpmax; addr = mb_correct_addr_and_bit(&fix, addr); tmpmax = max + fix; start += fix; ret = ext4_find_next_bit(addr, tmpmax, start) - fix; if (ret > max) return max; return ret; } static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) { char *bb; BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); BUG_ON(max == NULL); if (order > e4b->bd_blkbits + 1) { *max = 0; return NULL; } /* at order 0 we see each particular block */ if (order == 0) { *max = 1 << (e4b->bd_blkbits + 3); return e4b->bd_bitmap; } bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; return bb; } #ifdef DOUBLE_CHECK static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { int i; struct super_block *sb = e4b->bd_sb; if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); for (i = 0; i < count; i++) { if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { ext4_fsblk_t blocknr; blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += EXT4_C2B(EXT4_SB(sb), first + i); ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); ext4_grp_locked_error(sb, e4b->bd_group, inode ? inode->i_ino : 0, blocknr, "freeing block already freed " "(bit %u)", first + i); } mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); } } static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) { int i; if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); for (i = 0; i < count; i++) { BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); mb_set_bit(first + i, e4b->bd_info->bb_bitmap); } } static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { if (unlikely(e4b->bd_info->bb_bitmap == NULL)) return; if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { unsigned char *b1, *b2; int i; b1 = (unsigned char *) e4b->bd_info->bb_bitmap; b2 = (unsigned char *) bitmap; for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { if (b1[i] != b2[i]) { ext4_msg(e4b->bd_sb, KERN_ERR, "corruption in group %u " "at byte %u(%u): %x in copy != %x " "on disk/prealloc", e4b->bd_group, i, i * 8, b1[i], b2[i]); BUG(); } } } } static void mb_group_bb_bitmap_alloc(struct super_block *sb, struct ext4_group_info *grp, ext4_group_t group) { struct buffer_head *bh; grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); if (!grp->bb_bitmap) return; bh = ext4_read_block_bitmap(sb, group); if (IS_ERR_OR_NULL(bh)) { kfree(grp->bb_bitmap); grp->bb_bitmap = NULL; return; } memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); put_bh(bh); } static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) { kfree(grp->bb_bitmap); } #else static inline void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { return; } static inline void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) { return; } static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) { return; } static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, struct ext4_group_info *grp, ext4_group_t group) { return; } static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) { return; } #endif #ifdef AGGRESSIVE_CHECK #define MB_CHECK_ASSERT(assert) \ do { \ if (!(assert)) { \ printk(KERN_EMERG \ "Assertion failure in %s() at %s:%d: \"%s\"\n", \ function, file, line, # assert); \ BUG(); \ } \ } while (0) static void __mb_check_buddy(struct ext4_buddy *e4b, char *file, const char *function, int line) { struct super_block *sb = e4b->bd_sb; int order = e4b->bd_blkbits + 1; int max; int max2; int i; int j; int k; int count; struct ext4_group_info *grp; int fragments = 0; int fstart; struct list_head *cur; void *buddy; void *buddy2; if (e4b->bd_info->bb_check_counter++ % 10) return; while (order > 1) { buddy = mb_find_buddy(e4b, order, &max); MB_CHECK_ASSERT(buddy); buddy2 = mb_find_buddy(e4b, order - 1, &max2); MB_CHECK_ASSERT(buddy2); MB_CHECK_ASSERT(buddy != buddy2); MB_CHECK_ASSERT(max * 2 == max2); count = 0; for (i = 0; i < max; i++) { if (mb_test_bit(i, buddy)) { /* only single bit in buddy2 may be 0 */ if (!mb_test_bit(i << 1, buddy2)) { MB_CHECK_ASSERT( mb_test_bit((i<<1)+1, buddy2)); } continue; } /* both bits in buddy2 must be 1 */ MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); for (j = 0; j < (1 << order); j++) { k = (i * (1 << order)) + j; MB_CHECK_ASSERT( !mb_test_bit(k, e4b->bd_bitmap)); } count++; } MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); order--; } fstart = -1; buddy = mb_find_buddy(e4b, 0, &max); for (i = 0; i < max; i++) { if (!mb_test_bit(i, buddy)) { MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); if (fstart == -1) { fragments++; fstart = i; } continue; } fstart = -1; /* check used bits only */ for (j = 0; j < e4b->bd_blkbits + 1; j++) { buddy2 = mb_find_buddy(e4b, j, &max2); k = i >> j; MB_CHECK_ASSERT(k < max2); MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); } } MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); grp = ext4_get_group_info(sb, e4b->bd_group); if (!grp) return; list_for_each(cur, &grp->bb_prealloc_list) { ext4_group_t groupnr; struct ext4_prealloc_space *pa; pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); MB_CHECK_ASSERT(groupnr == e4b->bd_group); for (i = 0; i < pa->pa_len; i++) MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); } } #undef MB_CHECK_ASSERT #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ __FILE__, __func__, __LINE__) #else #define mb_check_buddy(e4b) #endif /* * Divide blocks started from @first with length @len into * smaller chunks with power of 2 blocks. * Clear the bits in bitmap which the blocks of the chunk(s) covered, * then increase bb_counters[] for corresponded chunk size. */ static void ext4_mb_mark_free_simple(struct super_block *sb, void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t min; ext4_grpblk_t max; ext4_grpblk_t chunk; unsigned int border; BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); border = 2 << sb->s_blocksize_bits; while (len > 0) { /* find how many blocks can be covered since this position */ max = ffs(first | border) - 1; /* find how many blocks of power 2 we need to mark */ min = fls(len) - 1; if (max < min) min = max; chunk = 1 << min; /* mark multiblock chunks only */ grp->bb_counters[min]++; if (min > 0) mb_clear_bit(first >> min, buddy + sbi->s_mb_offsets[min]); len -= chunk; first += chunk; } } static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) { int order; /* * We don't bother with a special lists groups with only 1 block free * extents and for completely empty groups. */ order = fls(len) - 2; if (order < 0) return 0; if (order == MB_NUM_ORDERS(sb)) order--; if (WARN_ON_ONCE(order > MB_NUM_ORDERS(sb))) order = MB_NUM_ORDERS(sb) - 1; return order; } /* Move group to appropriate avg_fragment_size list */ static void mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); int new_order; if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_fragments == 0) return; new_order = mb_avg_fragment_size_order(sb, grp->bb_free / grp->bb_fragments); if (new_order == grp->bb_avg_fragment_size_order) return; if (grp->bb_avg_fragment_size_order != -1) { write_lock(&sbi->s_mb_avg_fragment_size_locks[ grp->bb_avg_fragment_size_order]); list_del(&grp->bb_avg_fragment_size_node); write_unlock(&sbi->s_mb_avg_fragment_size_locks[ grp->bb_avg_fragment_size_order]); } grp->bb_avg_fragment_size_order = new_order; write_lock(&sbi->s_mb_avg_fragment_size_locks[ grp->bb_avg_fragment_size_order]); list_add_tail(&grp->bb_avg_fragment_size_node, &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); write_unlock(&sbi->s_mb_avg_fragment_size_locks[ grp->bb_avg_fragment_size_order]); } /* * Choose next group by traversing largest_free_order lists. Updates *new_cr if * cr level needs an update. */ static void ext4_mb_choose_next_group_p2_aligned(struct ext4_allocation_context *ac, enum criteria *new_cr, ext4_group_t *group) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_group_info *iter; int i; if (ac->ac_status == AC_STATUS_FOUND) return; if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED)) atomic_inc(&sbi->s_bal_p2_aligned_bad_suggestions); for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { if (list_empty(&sbi->s_mb_largest_free_orders[i])) continue; read_lock(&sbi->s_mb_largest_free_orders_locks[i]); if (list_empty(&sbi->s_mb_largest_free_orders[i])) { read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); continue; } list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], bb_largest_free_order_node) { if (sbi->s_mb_stats) atomic64_inc(&sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED]); if (likely(ext4_mb_good_group(ac, iter->bb_group, CR_POWER2_ALIGNED))) { *group = iter->bb_group; ac->ac_flags |= EXT4_MB_CR_POWER2_ALIGNED_OPTIMIZED; read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); return; } } read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); } /* Increment cr and search again if no group is found */ *new_cr = CR_GOAL_LEN_FAST; } /* * Find a suitable group of given order from the average fragments list. */ static struct ext4_group_info * ext4_mb_find_good_group_avg_frag_lists(struct ext4_allocation_context *ac, int order) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct list_head *frag_list = &sbi->s_mb_avg_fragment_size[order]; rwlock_t *frag_list_lock = &sbi->s_mb_avg_fragment_size_locks[order]; struct ext4_group_info *grp = NULL, *iter; enum criteria cr = ac->ac_criteria; if (list_empty(frag_list)) return NULL; read_lock(frag_list_lock); if (list_empty(frag_list)) { read_unlock(frag_list_lock); return NULL; } list_for_each_entry(iter, frag_list, bb_avg_fragment_size_node) { if (sbi->s_mb_stats) atomic64_inc(&sbi->s_bal_cX_groups_considered[cr]); if (likely(ext4_mb_good_group(ac, iter->bb_group, cr))) { grp = iter; break; } } read_unlock(frag_list_lock); return grp; } /* * Choose next group by traversing average fragment size list of suitable * order. Updates *new_cr if cr level needs an update. */ static void ext4_mb_choose_next_group_goal_fast(struct ext4_allocation_context *ac, enum criteria *new_cr, ext4_group_t *group) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_group_info *grp = NULL; int i; if (unlikely(ac->ac_flags & EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED)) { if (sbi->s_mb_stats) atomic_inc(&sbi->s_bal_goal_fast_bad_suggestions); } for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); i < MB_NUM_ORDERS(ac->ac_sb); i++) { grp = ext4_mb_find_good_group_avg_frag_lists(ac, i); if (grp) { *group = grp->bb_group; ac->ac_flags |= EXT4_MB_CR_GOAL_LEN_FAST_OPTIMIZED; return; } } /* * CR_BEST_AVAIL_LEN works based on the concept that we have * a larger normalized goal len request which can be trimmed to * a smaller goal len such that it can still satisfy original * request len. However, allocation request for non-regular * files never gets normalized. * See function ext4_mb_normalize_request() (EXT4_MB_HINT_DATA). */ if (ac->ac_flags & EXT4_MB_HINT_DATA) *new_cr = CR_BEST_AVAIL_LEN; else *new_cr = CR_GOAL_LEN_SLOW; } /* * We couldn't find a group in CR_GOAL_LEN_FAST so try to find the highest free fragment * order we have and proactively trim the goal request length to that order to * find a suitable group faster. * * This optimizes allocation speed at the cost of slightly reduced * preallocations. However, we make sure that we don't trim the request too * much and fall to CR_GOAL_LEN_SLOW in that case. */ static void ext4_mb_choose_next_group_best_avail(struct ext4_allocation_context *ac, enum criteria *new_cr, ext4_group_t *group) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_group_info *grp = NULL; int i, order, min_order; unsigned long num_stripe_clusters = 0; if (unlikely(ac->ac_flags & EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED)) { if (sbi->s_mb_stats) atomic_inc(&sbi->s_bal_best_avail_bad_suggestions); } /* * mb_avg_fragment_size_order() returns order in a way that makes * retrieving back the length using (1 << order) inaccurate. Hence, use * fls() instead since we need to know the actual length while modifying * goal length. */ order = fls(ac->ac_g_ex.fe_len) - 1; if (WARN_ON_ONCE(order - 1 > MB_NUM_ORDERS(ac->ac_sb))) order = MB_NUM_ORDERS(ac->ac_sb); min_order = order - sbi->s_mb_best_avail_max_trim_order; if (min_order < 0) min_order = 0; if (sbi->s_stripe > 0) { /* * We are assuming that stripe size is always a multiple of * cluster ratio otherwise __ext4_fill_super exists early. */ num_stripe_clusters = EXT4_NUM_B2C(sbi, sbi->s_stripe); if (1 << min_order < num_stripe_clusters) /* * We consider 1 order less because later we round * up the goal len to num_stripe_clusters */ min_order = fls(num_stripe_clusters) - 1; } if (1 << min_order < ac->ac_o_ex.fe_len) min_order = fls(ac->ac_o_ex.fe_len); for (i = order; i >= min_order; i--) { int frag_order; /* * Scale down goal len to make sure we find something * in the free fragments list. Basically, reduce * preallocations. */ ac->ac_g_ex.fe_len = 1 << i; if (num_stripe_clusters > 0) { /* * Try to round up the adjusted goal length to * stripe size (in cluster units) multiple for * efficiency. */ ac->ac_g_ex.fe_len = roundup(ac->ac_g_ex.fe_len, num_stripe_clusters); } frag_order = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); grp = ext4_mb_find_good_group_avg_frag_lists(ac, frag_order); if (grp) { *group = grp->bb_group; ac->ac_flags |= EXT4_MB_CR_BEST_AVAIL_LEN_OPTIMIZED; return; } } /* Reset goal length to original goal length before falling into CR_GOAL_LEN_SLOW */ ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; *new_cr = CR_GOAL_LEN_SLOW; } static inline int should_optimize_scan(struct ext4_allocation_context *ac) { if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) return 0; if (ac->ac_criteria >= CR_GOAL_LEN_SLOW) return 0; if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) return 0; return 1; } /* * Return next linear group for allocation. */ static ext4_group_t next_linear_group(ext4_group_t group, ext4_group_t ngroups) { /* * Artificially restricted ngroups for non-extent * files makes group > ngroups possible on first loop. */ return group + 1 >= ngroups ? 0 : group + 1; } /* * ext4_mb_choose_next_group: choose next group for allocation. * * @ac Allocation Context * @new_cr This is an output parameter. If the there is no good group * available at current CR level, this field is updated to indicate * the new cr level that should be used. * @group This is an input / output parameter. As an input it indicates the * next group that the allocator intends to use for allocation. As * output, this field indicates the next group that should be used as * determined by the optimization functions. * @ngroups Total number of groups */ static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, enum criteria *new_cr, ext4_group_t *group, ext4_group_t ngroups) { *new_cr = ac->ac_criteria; if (!should_optimize_scan(ac)) { *group = next_linear_group(*group, ngroups); return; } /* * Optimized scanning can return non adjacent groups which can cause * seek overhead for rotational disks. So try few linear groups before * trying optimized scan. */ if (ac->ac_groups_linear_remaining) { *group = next_linear_group(*group, ngroups); ac->ac_groups_linear_remaining--; return; } if (*new_cr == CR_POWER2_ALIGNED) { ext4_mb_choose_next_group_p2_aligned(ac, new_cr, group); } else if (*new_cr == CR_GOAL_LEN_FAST) { ext4_mb_choose_next_group_goal_fast(ac, new_cr, group); } else if (*new_cr == CR_BEST_AVAIL_LEN) { ext4_mb_choose_next_group_best_avail(ac, new_cr, group); } else { /* * TODO: For CR_GOAL_LEN_SLOW, we can arrange groups in an * rb tree sorted by bb_free. But until that happens, we should * never come here. */ WARN_ON(1); } } /* * Cache the order of the largest free extent we have available in this block * group. */ static void mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); int i; for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) if (grp->bb_counters[i] > 0) break; /* No need to move between order lists? */ if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || i == grp->bb_largest_free_order) { grp->bb_largest_free_order = i; return; } if (grp->bb_largest_free_order >= 0) { write_lock(&sbi->s_mb_largest_free_orders_locks[ grp->bb_largest_free_order]); list_del_init(&grp->bb_largest_free_order_node); write_unlock(&sbi->s_mb_largest_free_orders_locks[ grp->bb_largest_free_order]); } grp->bb_largest_free_order = i; if (grp->bb_largest_free_order >= 0 && grp->bb_free) { write_lock(&sbi->s_mb_largest_free_orders_locks[ grp->bb_largest_free_order]); list_add_tail(&grp->bb_largest_free_order_node, &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); write_unlock(&sbi->s_mb_largest_free_orders_locks[ grp->bb_largest_free_order]); } } static noinline_for_stack void ext4_mb_generate_buddy(struct super_block *sb, void *buddy, void *bitmap, ext4_group_t group, struct ext4_group_info *grp) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); ext4_grpblk_t i = 0; ext4_grpblk_t first; ext4_grpblk_t len; unsigned free = 0; unsigned fragments = 0; unsigned long long period = get_cycles(); /* initialize buddy from bitmap which is aggregation * of on-disk bitmap and preallocations */ i = mb_find_next_zero_bit(bitmap, max, 0); grp->bb_first_free = i; while (i < max) { fragments++; first = i; i = mb_find_next_bit(bitmap, max, i); len = i - first; free += len; if (len > 1) ext4_mb_mark_free_simple(sb, buddy, first, len, grp); else grp->bb_counters[0]++; if (i < max) i = mb_find_next_zero_bit(bitmap, max, i); } grp->bb_fragments = fragments; if (free != grp->bb_free) { ext4_grp_locked_error(sb, group, 0, 0, "block bitmap and bg descriptor " "inconsistent: %u vs %u free clusters", free, grp->bb_free); /* * If we intend to continue, we consider group descriptor * corrupt and update bb_free using bitmap value */ grp->bb_free = free; ext4_mark_group_bitmap_corrupted(sb, group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); } mb_set_largest_free_order(sb, grp); mb_update_avg_fragment_size(sb, grp); clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); period = get_cycles() - period; atomic_inc(&sbi->s_mb_buddies_generated); atomic64_add(period, &sbi->s_mb_generation_time); } static void mb_regenerate_buddy(struct ext4_buddy *e4b) { int count; int order = 1; void *buddy; while ((buddy = mb_find_buddy(e4b, order++, &count))) mb_set_bits(buddy, 0, count); e4b->bd_info->bb_fragments = 0; memset(e4b->bd_info->bb_counters, 0, sizeof(*e4b->bd_info->bb_counters) * (e4b->bd_sb->s_blocksize_bits + 2)); ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); } /* The buddy information is attached the buddy cache inode * for convenience. The information regarding each group * is loaded via ext4_mb_load_buddy. The information involve * block bitmap and buddy information. The information are * stored in the inode as * * { page } * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... * * * one block each for bitmap and buddy information. * So for each group we take up 2 blocks. A page can * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. * So it can have information regarding groups_per_page which * is blocks_per_page/2 * * Locking note: This routine takes the block group lock of all groups * for this page; do not hold this lock when calling this routine! */ static int ext4_mb_init_cache(struct folio *folio, char *incore, gfp_t gfp) { ext4_group_t ngroups; unsigned int blocksize; int blocks_per_page; int groups_per_page; int err = 0; int i; ext4_group_t first_group, group; int first_block; struct super_block *sb; struct buffer_head *bhs; struct buffer_head **bh = NULL; struct inode *inode; char *data; char *bitmap; struct ext4_group_info *grinfo; inode = folio->mapping->host; sb = inode->i_sb; ngroups = ext4_get_groups_count(sb); blocksize = i_blocksize(inode); blocks_per_page = PAGE_SIZE / blocksize; mb_debug(sb, "init folio %lu\n", folio->index); groups_per_page = blocks_per_page >> 1; if (groups_per_page == 0) groups_per_page = 1; /* allocate buffer_heads to read bitmaps */ if (groups_per_page > 1) { i = sizeof(struct buffer_head *) * groups_per_page; bh = kzalloc(i, gfp); if (bh == NULL) return -ENOMEM; } else bh = &bhs; first_group = folio->index * blocks_per_page / 2; /* read all groups the folio covers into the cache */ for (i = 0, group = first_group; i < groups_per_page; i++, group++) { if (group >= ngroups) break; grinfo = ext4_get_group_info(sb, group); if (!grinfo) continue; /* * If page is uptodate then we came here after online resize * which added some new uninitialized group info structs, so * we must skip all initialized uptodate buddies on the folio, * which may be currently in use by an allocating task. */ if (folio_test_uptodate(folio) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { bh[i] = NULL; continue; } bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); if (IS_ERR(bh[i])) { err = PTR_ERR(bh[i]); bh[i] = NULL; goto out; } mb_debug(sb, "read bitmap for group %u\n", group); } /* wait for I/O completion */ for (i = 0, group = first_group; i < groups_per_page; i++, group++) { int err2; if (!bh[i]) continue; err2 = ext4_wait_block_bitmap(sb, group, bh[i]); if (!err) err = err2; } first_block = folio->index * blocks_per_page; for (i = 0; i < blocks_per_page; i++) { group = (first_block + i) >> 1; if (group >= ngroups) break; if (!bh[group - first_group]) /* skip initialized uptodate buddy */ continue; if (!buffer_verified(bh[group - first_group])) /* Skip faulty bitmaps */ continue; err = 0; /* * data carry information regarding this * particular group in the format specified * above * */ data = folio_address(folio) + (i * blocksize); bitmap = bh[group - first_group]->b_data; /* * We place the buddy block and bitmap block * close together */ grinfo = ext4_get_group_info(sb, group); if (!grinfo) { err = -EFSCORRUPTED; goto out; } if ((first_block + i) & 1) { /* this is block of buddy */ BUG_ON(incore == NULL); mb_debug(sb, "put buddy for group %u in folio %lu/%x\n", group, folio->index, i * blocksize); trace_ext4_mb_buddy_bitmap_load(sb, group); grinfo->bb_fragments = 0; memset(grinfo->bb_counters, 0, sizeof(*grinfo->bb_counters) * (MB_NUM_ORDERS(sb))); /* * incore got set to the group block bitmap below */ ext4_lock_group(sb, group); /* init the buddy */ memset(data, 0xff, blocksize); ext4_mb_generate_buddy(sb, data, incore, group, grinfo); ext4_unlock_group(sb, group); incore = NULL; } else { /* this is block of bitmap */ BUG_ON(incore != NULL); mb_debug(sb, "put bitmap for group %u in folio %lu/%x\n", group, folio->index, i * blocksize); trace_ext4_mb_bitmap_load(sb, group); /* see comments in ext4_mb_put_pa() */ ext4_lock_group(sb, group); memcpy(data, bitmap, blocksize); /* mark all preallocated blks used in in-core bitmap */ ext4_mb_generate_from_pa(sb, data, group); WARN_ON_ONCE(!RB_EMPTY_ROOT(&grinfo->bb_free_root)); ext4_unlock_group(sb, group); /* set incore so that the buddy information can be * generated using this */ incore = data; } } folio_mark_uptodate(folio); out: if (bh) { for (i = 0; i < groups_per_page; i++) brelse(bh[i]); if (bh != &bhs) kfree(bh); } return err; } /* * Lock the buddy and bitmap pages. This make sure other parallel init_group * on the same buddy page doesn't happen whild holding the buddy page lock. * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap * are on the same page e4b->bd_buddy_folio is NULL and return value is 0. */ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) { struct inode *inode = EXT4_SB(sb)->s_buddy_cache; int block, pnum, poff; int blocks_per_page; struct folio *folio; e4b->bd_buddy_folio = NULL; e4b->bd_bitmap_folio = NULL; blocks_per_page = PAGE_SIZE / sb->s_blocksize; /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); if (IS_ERR(folio)) return PTR_ERR(folio); BUG_ON(folio->mapping != inode->i_mapping); e4b->bd_bitmap_folio = folio; e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize); if (blocks_per_page >= 2) { /* buddy and bitmap are on the same page */ return 0; } /* blocks_per_page == 1, hence we need another page for the buddy */ folio = __filemap_get_folio(inode->i_mapping, block + 1, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); if (IS_ERR(folio)) return PTR_ERR(folio); BUG_ON(folio->mapping != inode->i_mapping); e4b->bd_buddy_folio = folio; return 0; } static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_folio) { folio_unlock(e4b->bd_bitmap_folio); folio_put(e4b->bd_bitmap_folio); } if (e4b->bd_buddy_folio) { folio_unlock(e4b->bd_buddy_folio); folio_put(e4b->bd_buddy_folio); } } /* * Locking note: This routine calls ext4_mb_init_cache(), which takes the * block group lock of all groups for this page; do not hold the BG lock when * calling this routine! */ static noinline_for_stack int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) { struct ext4_group_info *this_grp; struct ext4_buddy e4b; struct folio *folio; int ret = 0; might_sleep(); mb_debug(sb, "init group %u\n", group); this_grp = ext4_get_group_info(sb, group); if (!this_grp) return -EFSCORRUPTED; /* * This ensures that we don't reinit the buddy cache * page which map to the group from which we are already * allocating. If we are looking at the buddy cache we would * have taken a reference using ext4_mb_load_buddy and that * would have pinned buddy page to page cache. * The call to ext4_mb_get_buddy_page_lock will mark the * page accessed. */ ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { /* * somebody initialized the group * return without doing anything */ goto err; } folio = e4b.bd_bitmap_folio; ret = ext4_mb_init_cache(folio, NULL, gfp); if (ret) goto err; if (!folio_test_uptodate(folio)) { ret = -EIO; goto err; } if (e4b.bd_buddy_folio == NULL) { /* * If both the bitmap and buddy are in * the same page we don't need to force * init the buddy */ ret = 0; goto err; } /* init buddy cache */ folio = e4b.bd_buddy_folio; ret = ext4_mb_init_cache(folio, e4b.bd_bitmap, gfp); if (ret) goto err; if (!folio_test_uptodate(folio)) { ret = -EIO; goto err; } err: ext4_mb_put_buddy_page_lock(&e4b); return ret; } /* * Locking note: This routine calls ext4_mb_init_cache(), which takes the * block group lock of all groups for this page; do not hold the BG lock when * calling this routine! */ static noinline_for_stack int ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) { int blocks_per_page; int block; int pnum; int poff; struct folio *folio; int ret; struct ext4_group_info *grp; struct ext4_sb_info *sbi = EXT4_SB(sb); struct inode *inode = sbi->s_buddy_cache; might_sleep(); mb_debug(sb, "load group %u\n", group); blocks_per_page = PAGE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); if (!grp) return -EFSCORRUPTED; e4b->bd_blkbits = sb->s_blocksize_bits; e4b->bd_info = grp; e4b->bd_sb = sb; e4b->bd_group = group; e4b->bd_buddy_folio = NULL; e4b->bd_bitmap_folio = NULL; if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { /* * we need full data about the group * to make a good selection */ ret = ext4_mb_init_group(sb, group, gfp); if (ret) return ret; } /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. * So for each group we need two blocks. */ block = group * 2; pnum = block / blocks_per_page; poff = block % blocks_per_page; /* Avoid locking the folio in the fast path ... */ folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0); if (IS_ERR(folio) || !folio_test_uptodate(folio)) { if (!IS_ERR(folio)) /* * drop the folio reference and try * to get the folio with lock. If we * are not uptodate that implies * somebody just created the folio but * is yet to initialize it. So * wait for it to initialize. */ folio_put(folio); folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); if (!IS_ERR(folio)) { if (WARN_RATELIMIT(folio->mapping != inode->i_mapping, "ext4: bitmap's mapping != inode->i_mapping\n")) { /* should never happen */ folio_unlock(folio); ret = -EINVAL; goto err; } if (!folio_test_uptodate(folio)) { ret = ext4_mb_init_cache(folio, NULL, gfp); if (ret) { folio_unlock(folio); goto err; } mb_cmp_bitmaps(e4b, folio_address(folio) + (poff * sb->s_blocksize)); } folio_unlock(folio); } } if (IS_ERR(folio)) { ret = PTR_ERR(folio); goto err; } if (!folio_test_uptodate(folio)) { ret = -EIO; goto err; } /* Folios marked accessed already */ e4b->bd_bitmap_folio = folio; e4b->bd_bitmap = folio_address(folio) + (poff * sb->s_blocksize); block++; pnum = block / blocks_per_page; poff = block % blocks_per_page; folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_ACCESSED, 0); if (IS_ERR(folio) || !folio_test_uptodate(folio)) { if (!IS_ERR(folio)) folio_put(folio); folio = __filemap_get_folio(inode->i_mapping, pnum, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp); if (!IS_ERR(folio)) { if (WARN_RATELIMIT(folio->mapping != inode->i_mapping, "ext4: buddy bitmap's mapping != inode->i_mapping\n")) { /* should never happen */ folio_unlock(folio); ret = -EINVAL; goto err; } if (!folio_test_uptodate(folio)) { ret = ext4_mb_init_cache(folio, e4b->bd_bitmap, gfp); if (ret) { folio_unlock(folio); goto err; } } folio_unlock(folio); } } if (IS_ERR(folio)) { ret = PTR_ERR(folio); goto err; } if (!folio_test_uptodate(folio)) { ret = -EIO; goto err; } /* Folios marked accessed already */ e4b->bd_buddy_folio = folio; e4b->bd_buddy = folio_address(folio) + (poff * sb->s_blocksize); return 0; err: if (!IS_ERR_OR_NULL(folio)) folio_put(folio); if (e4b->bd_bitmap_folio) folio_put(e4b->bd_bitmap_folio); e4b->bd_buddy = NULL; e4b->bd_bitmap = NULL; return ret; } static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, struct ext4_buddy *e4b) { return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); } static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_folio) folio_put(e4b->bd_bitmap_folio); if (e4b->bd_buddy_folio) folio_put(e4b->bd_buddy_folio); } static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) { int order = 1, max; void *bb; BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); while (order <= e4b->bd_blkbits + 1) { bb = mb_find_buddy(e4b, order, &max); if (!mb_test_bit(block >> order, bb)) { /* this block is part of buddy of order 'order' */ return order; } order++; } return 0; } static void mb_clear_bits(void *bm, int cur, int len) { __u32 *addr; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: clear whole word at once */ addr = bm + (cur >> 3); *addr = 0; cur += 32; continue; } mb_clear_bit(cur, bm); cur++; } } /* clear bits in given range * will return first found zero bit if any, -1 otherwise */ static int mb_test_and_clear_bits(void *bm, int cur, int len) { __u32 *addr; int zero_bit = -1; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: clear whole word at once */ addr = bm + (cur >> 3); if (*addr != (__u32)(-1) && zero_bit == -1) zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); *addr = 0; cur += 32; continue; } if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) zero_bit = cur; cur++; } return zero_bit; } void mb_set_bits(void *bm, int cur, int len) { __u32 *addr; len = cur + len; while (cur < len) { if ((cur & 31) == 0 && (len - cur) >= 32) { /* fast path: set whole word at once */ addr = bm + (cur >> 3); *addr = 0xffffffff; cur += 32; continue; } mb_set_bit(cur, bm); cur++; } } static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) { if (mb_test_bit(*bit + side, bitmap)) { mb_clear_bit(*bit, bitmap); (*bit) -= side; return 1; } else { (*bit) += side; mb_set_bit(*bit, bitmap); return -1; } } static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) { int max; int order = 1; void *buddy = mb_find_buddy(e4b, order, &max); while (buddy) { void *buddy2; /* Bits in range [first; last] are known to be set since * corresponding blocks were allocated. Bits in range * (first; last) will stay set because they form buddies on * upper layer. We just deal with borders if they don't * align with upper layer and then go up. * Releasing entire group is all about clearing * single bit of highest order buddy. */ /* Example: * --------------------------------- * | 1 | 1 | 1 | 1 | * --------------------------------- * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | * --------------------------------- * 0 1 2 3 4 5 6 7 * \_____________________/ * * Neither [1] nor [6] is aligned to above layer. * Left neighbour [0] is free, so mark it busy, * decrease bb_counters and extend range to * [0; 6] * Right neighbour [7] is busy. It can't be coaleasced with [6], so * mark [6] free, increase bb_counters and shrink range to * [0; 5]. * Then shift range to [0; 2], go up and do the same. */ if (first & 1) e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); if (!(last & 1)) e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); if (first > last) break; order++; buddy2 = mb_find_buddy(e4b, order, &max); if (!buddy2) { mb_clear_bits(buddy, first, last - first + 1); e4b->bd_info->bb_counters[order - 1] += last - first + 1; break; } first >>= 1; last >>= 1; buddy = buddy2; } } static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, int first, int count) { int left_is_free = 0; int right_is_free = 0; int block; int last = first + count - 1; struct super_block *sb = e4b->bd_sb; if (WARN_ON(count == 0)) return; BUG_ON(last >= (sb->s_blocksize << 3)); assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); /* Don't bother if the block group is corrupt. */ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) return; mb_check_buddy(e4b); mb_free_blocks_double(inode, e4b, first, count); /* access memory sequentially: check left neighbour, * clear range and then check right neighbour */ if (first != 0) left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); if (unlikely(block != -1)) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_fsblk_t blocknr; /* * Fastcommit replay can free already freed blocks which * corrupts allocation info. Regenerate it. */ if (sbi->s_mount_state & EXT4_FC_REPLAY) { mb_regenerate_buddy(e4b); goto check; } blocknr = ext4_group_first_block_no(sb, e4b->bd_group); blocknr += EXT4_C2B(sbi, block); ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); ext4_grp_locked_error(sb, e4b->bd_group, inode ? inode->i_ino : 0, blocknr, "freeing already freed block (bit %u); block bitmap corrupt.", block); return; } this_cpu_inc(discard_pa_seq); e4b->bd_info->bb_free += count; if (first < e4b->bd_info->bb_first_free) e4b->bd_info->bb_first_free = first; /* let's maintain fragments counter */ if (left_is_free && right_is_free) e4b->bd_info->bb_fragments--; else if (!left_is_free && !right_is_free) e4b->bd_info->bb_fragments++; /* buddy[0] == bd_bitmap is a special case, so handle * it right away and let mb_buddy_mark_free stay free of * zero order checks. * Check if neighbours are to be coaleasced, * adjust bitmap bb_counters and borders appropriately. */ if (first & 1) { first += !left_is_free; e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; } if (!(last & 1)) { last -= !right_is_free; e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; } if (first <= last) mb_buddy_mark_free(e4b, first >> 1, last >> 1); mb_set_largest_free_order(sb, e4b->bd_info); mb_update_avg_fragment_size(sb, e4b->bd_info); check: mb_check_buddy(e4b); } static int mb_find_extent(struct ext4_buddy *e4b, int block, int needed, struct ext4_free_extent *ex) { int max, order, next; void *buddy; assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); BUG_ON(ex == NULL); buddy = mb_find_buddy(e4b, 0, &max); BUG_ON(buddy == NULL); BUG_ON(block >= max); if (mb_test_bit(block, buddy)) { ex->fe_len = 0; ex->fe_start = 0; ex->fe_group = 0; return 0; } /* find actual order */ order = mb_find_order_for_block(e4b, block); ex->fe_len = (1 << order) - (block & ((1 << order) - 1)); ex->fe_start = block; ex->fe_group = e4b->bd_group; block = block >> order; while (needed > ex->fe_len && mb_find_buddy(e4b, order, &max)) { if (block + 1 >= max) break; next = (block + 1) * (1 << order); if (mb_test_bit(next, e4b->bd_bitmap)) break; order = mb_find_order_for_block(e4b, next); block = next >> order; ex->fe_len += 1 << order; } if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { /* Should never happen! (but apparently sometimes does?!?) */ WARN_ON(1); ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, "corruption or bug in mb_find_extent " "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", block, order, needed, ex->fe_group, ex->fe_start, ex->fe_len, ex->fe_logical); ex->fe_len = 0; ex->fe_start = 0; ex->fe_group = 0; } return ex->fe_len; } static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) { int ord; int mlen = 0; int max = 0; int start = ex->fe_start; int len = ex->fe_len; unsigned ret = 0; int len0 = len; void *buddy; int ord_start, ord_end; BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); BUG_ON(e4b->bd_group != ex->fe_group); assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); mb_check_buddy(e4b); mb_mark_used_double(e4b, start, len); this_cpu_inc(discard_pa_seq); e4b->bd_info->bb_free -= len; if (e4b->bd_info->bb_first_free == start) e4b->bd_info->bb_first_free += len; /* let's maintain fragments counter */ if (start != 0) mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) max = !mb_test_bit(start + len, e4b->bd_bitmap); if (mlen && max) e4b->bd_info->bb_fragments++; else if (!mlen && !max) e4b->bd_info->bb_fragments--; /* let's maintain buddy itself */ while (len) { ord = mb_find_order_for_block(e4b, start); if (((start >> ord) << ord) == start && len >= (1 << ord)) { /* the whole chunk may be allocated at once! */ mlen = 1 << ord; buddy = mb_find_buddy(e4b, ord, &max); BUG_ON((start >> ord) >= max); mb_set_bit(start >> ord, buddy); e4b->bd_info->bb_counters[ord]--; start += mlen; len -= mlen; BUG_ON(len < 0); continue; } /* store for history */ if (ret == 0) ret = len | (ord << 16); BUG_ON(ord <= 0); buddy = mb_find_buddy(e4b, ord, &max); mb_set_bit(start >> ord, buddy); e4b->bd_info->bb_counters[ord]--; ord_start = (start >> ord) << ord; ord_end = ord_start + (1 << ord); /* first chunk */ if (start > ord_start) ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy, ord_start, start - ord_start, e4b->bd_info); /* last chunk */ if (start + len < ord_end) { ext4_mb_mark_free_simple(e4b->bd_sb, e4b->bd_buddy, start + len, ord_end - (start + len), e4b->bd_info); break; } len = start + len - ord_end; start = ord_end; } mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); mb_check_buddy(e4b); return ret; } /* * Must be called under group lock! */ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int ret; BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); BUG_ON(ac->ac_status == AC_STATUS_FOUND); ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; ret = mb_mark_used(e4b, &ac->ac_b_ex); /* preallocation can change ac_b_ex, thus we store actually * allocated blocks for history */ ac->ac_f_ex = ac->ac_b_ex; ac->ac_status = AC_STATUS_FOUND; ac->ac_tail = ret & 0xffff; ac->ac_buddy = ret >> 16; /* * take the page reference. We want the page to be pinned * so that we don't get a ext4_mb_init_cache_call for this * group until we update the bitmap. That would mean we * double allocate blocks. The reference is dropped * in ext4_mb_release_context */ ac->ac_bitmap_folio = e4b->bd_bitmap_folio; folio_get(ac->ac_bitmap_folio); ac->ac_buddy_folio = e4b->bd_buddy_folio; folio_get(ac->ac_buddy_folio); /* store last allocated for subsequent stream allocation */ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { spin_lock(&sbi->s_md_lock); sbi->s_mb_last_group = ac->ac_f_ex.fe_group; sbi->s_mb_last_start = ac->ac_f_ex.fe_start; spin_unlock(&sbi->s_md_lock); } /* * As we've just preallocated more space than * user requested originally, we store allocated * space in a special descriptor. */ if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) ext4_mb_new_preallocation(ac); } static void ext4_mb_check_limits(struct ext4_allocation_context *ac, struct ext4_buddy *e4b, int finish_group) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_free_extent *bex = &ac->ac_b_ex; struct ext4_free_extent *gex = &ac->ac_g_ex; if (ac->ac_status == AC_STATUS_FOUND) return; /* * We don't want to scan for a whole year */ if (ac->ac_found > sbi->s_mb_max_to_scan && !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { ac->ac_status = AC_STATUS_BREAK; return; } /* * Haven't found good chunk so far, let's continue */ if (bex->fe_len < gex->fe_len) return; if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan) ext4_mb_use_best_found(ac, e4b); } /* * The routine checks whether found extent is good enough. If it is, * then the extent gets marked used and flag is set to the context * to stop scanning. Otherwise, the extent is compared with the * previous found extent and if new one is better, then it's stored * in the context. Later, the best found extent will be used, if * mballoc can't find good enough extent. * * The algorithm used is roughly as follows: * * * If free extent found is exactly as big as goal, then * stop the scan and use it immediately * * * If free extent found is smaller than goal, then keep retrying * upto a max of sbi->s_mb_max_to_scan times (default 200). After * that stop scanning and use whatever we have. * * * If free extent found is bigger than goal, then keep retrying * upto a max of sbi->s_mb_min_to_scan times (default 10) before * stopping the scan and using the extent. * * * FIXME: real allocation policy is to be designed yet! */ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, struct ext4_free_extent *ex, struct ext4_buddy *e4b) { struct ext4_free_extent *bex = &ac->ac_b_ex; struct ext4_free_extent *gex = &ac->ac_g_ex; BUG_ON(ex->fe_len <= 0); BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); ac->ac_found++; ac->ac_cX_found[ac->ac_criteria]++; /* * The special case - take what you catch first */ if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { *bex = *ex; ext4_mb_use_best_found(ac, e4b); return; } /* * Let's check whether the chuck is good enough */ if (ex->fe_len == gex->fe_len) { *bex = *ex; ext4_mb_use_best_found(ac, e4b); return; } /* * If this is first found extent, just store it in the context */ if (bex->fe_len == 0) { *bex = *ex; return; } /* * If new found extent is better, store it in the context */ if (bex->fe_len < gex->fe_len) { /* if the request isn't satisfied, any found extent * larger than previous best one is better */ if (ex->fe_len > bex->fe_len) *bex = *ex; } else if (ex->fe_len > gex->fe_len) { /* if the request is satisfied, then we try to find * an extent that still satisfy the request, but is * smaller than previous one */ if (ex->fe_len < bex->fe_len) *bex = *ex; } ext4_mb_check_limits(ac, e4b, 0); } static noinline_for_stack void ext4_mb_try_best_found(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct ext4_free_extent ex = ac->ac_b_ex; ext4_group_t group = ex.fe_group; int max; int err; BUG_ON(ex.fe_len <= 0); err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); if (err) return; ext4_lock_group(ac->ac_sb, group); if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) goto out; max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); if (max > 0) { ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } out: ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); } static noinline_for_stack int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { ext4_group_t group = ac->ac_g_ex.fe_group; int max; int err; struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct ext4_free_extent ex; if (!grp) return -EFSCORRUPTED; if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) return 0; if (grp->bb_free == 0) return 0; err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); if (err) return err; ext4_lock_group(ac->ac_sb, group); if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) goto out; max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, ac->ac_g_ex.fe_len, &ex); ex.fe_logical = 0xDEADFA11; /* debug value */ if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == EXT4_NUM_B2C(sbi, sbi->s_stripe)) { ext4_fsblk_t start; start = ext4_grp_offs_to_block(ac->ac_sb, &ex); /* use do_div to get remainder (would be 64-bit modulo) */ if (do_div(start, sbi->s_stripe) == 0) { ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } } else if (max >= ac->ac_g_ex.fe_len) { BUG_ON(ex.fe_len <= 0); BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { /* Sometimes, caller may want to merge even small * number of blocks to an existing extent */ BUG_ON(ex.fe_len <= 0); BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); ac->ac_found++; ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); } out: ext4_unlock_group(ac->ac_sb, group); ext4_mb_unload_buddy(e4b); return 0; } /* * The routine scans buddy structures (not bitmap!) from given order * to max order and tries to find big enough chunk to satisfy the req */ static noinline_for_stack void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; struct ext4_group_info *grp = e4b->bd_info; void *buddy; int i; int k; int max; BUG_ON(ac->ac_2order <= 0); for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { if (grp->bb_counters[i] == 0) continue; buddy = mb_find_buddy(e4b, i, &max); if (WARN_RATELIMIT(buddy == NULL, "ext4: mb_simple_scan_group: mb_find_buddy failed, (%d)\n", i)) continue; k = mb_find_next_zero_bit(buddy, max, 0); if (k >= max) { ext4_mark_group_bitmap_corrupted(ac->ac_sb, e4b->bd_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, "%d free clusters of order %d. But found 0", grp->bb_counters[i], i); break; } ac->ac_found++; ac->ac_cX_found[ac->ac_criteria]++; ac->ac_b_ex.fe_len = 1 << i; ac->ac_b_ex.fe_start = k << i; ac->ac_b_ex.fe_group = e4b->bd_group; ext4_mb_use_best_found(ac, e4b); BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); if (EXT4_SB(sb)->s_mb_stats) atomic_inc(&EXT4_SB(sb)->s_bal_2orders); break; } } /* * The routine scans the group and measures all found extents. * In order to optimize scanning, caller must pass number of * free blocks in the group, so the routine can know upper limit. */ static noinline_for_stack void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; void *bitmap = e4b->bd_bitmap; struct ext4_free_extent ex; int i, j, freelen; int free; free = e4b->bd_info->bb_free; if (WARN_ON(free <= 0)) return; i = e4b->bd_info->bb_first_free; while (free && ac->ac_status == AC_STATUS_CONTINUE) { i = mb_find_next_zero_bit(bitmap, EXT4_CLUSTERS_PER_GROUP(sb), i); if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { /* * IF we have corrupt bitmap, we won't find any * free blocks even though group info says we * have free blocks */ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " "group info. But bitmap says 0", free); break; } if (!ext4_mb_cr_expensive(ac->ac_criteria)) { /* * In CR_GOAL_LEN_FAST and CR_BEST_AVAIL_LEN, we are * sure that this group will have a large enough * continuous free extent, so skip over the smaller free * extents */ j = mb_find_next_bit(bitmap, EXT4_CLUSTERS_PER_GROUP(sb), i); freelen = j - i; if (freelen < ac->ac_g_ex.fe_len) { i = j; free -= freelen; continue; } } mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); if (WARN_ON(ex.fe_len <= 0)) break; if (free < ex.fe_len) { ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, EXT4_GROUP_INFO_BBITMAP_CORRUPT); ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, "%d free clusters as per " "group info. But got %d blocks", free, ex.fe_len); /* * The number of free blocks differs. This mostly * indicate that the bitmap is corrupt. So exit * without claiming the space. */ break; } ex.fe_logical = 0xDEADC0DE; /* debug value */ ext4_mb_measure_extent(ac, &ex, e4b); i += ex.fe_len; free -= ex.fe_len; } ext4_mb_check_limits(ac, e4b, 1); } /* * This is a special case for storages like raid5 * we try to find stripe-aligned chunks for stripe-size-multiple requests */ static noinline_for_stack void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, struct ext4_buddy *e4b) { struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); void *bitmap = e4b->bd_bitmap; struct ext4_free_extent ex; ext4_fsblk_t first_group_block; ext4_fsblk_t a; ext4_grpblk_t i, stripe; int max; BUG_ON(sbi->s_stripe == 0); /* find first stripe-aligned block in group */ first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); a = first_group_block + sbi->s_stripe - 1; do_div(a, sbi->s_stripe); i = (a * sbi->s_stripe) - first_group_block; stripe = EXT4_NUM_B2C(sbi, sbi->s_stripe); i = EXT4_B2C(sbi, i); while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { if (!mb_test_bit(i, bitmap)) { max = mb_find_extent(e4b, i, stripe, &ex); if (max >= stripe) { ac->ac_found++; ac->ac_cX_found[ac->ac_criteria]++; ex.fe_logical = 0xDEADF00D; /* debug value */ ac->ac_b_ex = ex; ext4_mb_use_best_found(ac, e4b); break; } } i += stripe; } } /* * This is also called BEFORE we load the buddy bitmap. * Returns either 1 or 0 indicating that the group is either suitable * for the allocation or not. */ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, ext4_group_t group, enum criteria cr) { ext4_grpblk_t free, fragments; int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); BUG_ON(cr < CR_POWER2_ALIGNED || cr >= EXT4_MB_NUM_CRS); if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) return false; free = grp->bb_free; if (free == 0) return false; fragments = grp->bb_fragments; if (fragments == 0) return false; switch (cr) { case CR_POWER2_ALIGNED: BUG_ON(ac->ac_2order == 0); /* Avoid using the first bg of a flexgroup for data files */ if ((ac->ac_flags & EXT4_MB_HINT_DATA) && (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && ((group % flex_size) == 0)) return false; if (free < ac->ac_g_ex.fe_len) return false; if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) return true; if (grp->bb_largest_free_order < ac->ac_2order) return false; return true; case CR_GOAL_LEN_FAST: case CR_BEST_AVAIL_LEN: if ((free / fragments) >= ac->ac_g_ex.fe_len) return true; break; case CR_GOAL_LEN_SLOW: if (free >= ac->ac_g_ex.fe_len) return true; break; case CR_ANY_FREE: return true; default: BUG(); } return false; } /* * This could return negative error code if something goes wrong * during ext4_mb_init_group(). This should not be called with * ext4_lock_group() held. * * Note: because we are conditionally operating with the group lock in * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this * function using __acquire and __release. This means we need to be * super careful before messing with the error path handling via "goto * out"! */ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, ext4_group_t group, enum criteria cr) { struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; ext4_grpblk_t free; int ret = 0; if (!grp) return -EFSCORRUPTED; if (sbi->s_mb_stats) atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); if (should_lock) { ext4_lock_group(sb, group); __release(ext4_group_lock_ptr(sb, group)); } free = grp->bb_free; if (free == 0) goto out; /* * In all criterias except CR_ANY_FREE we try to avoid groups that * can't possibly satisfy the full goal request due to insufficient * free blocks. */ if (cr < CR_ANY_FREE && free < ac->ac_g_ex.fe_len) goto out; if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) goto out; if (should_lock) { __acquire(ext4_group_lock_ptr(sb, group)); ext4_unlock_group(sb, group); } /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); int ret; /* * CR_POWER2_ALIGNED/CR_GOAL_LEN_FAST is a very optimistic * search to find large good chunks almost for free. If buddy * data is not ready, then this optimization makes no sense. But * we never skip the first block group in a flex_bg, since this * gets used for metadata block allocation, and we want to make * sure we locate metadata blocks in the first block group in * the flex_bg if possible. */ if (!ext4_mb_cr_expensive(cr) && (!sbi->s_log_groups_per_flex || ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && !(ext4_has_group_desc_csum(sb) && (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) return 0; ret = ext4_mb_init_group(sb, group, GFP_NOFS); if (ret) return ret; } if (should_lock) { ext4_lock_group(sb, group); __release(ext4_group_lock_ptr(sb, group)); } ret = ext4_mb_good_group(ac, group, cr); out: if (should_lock) { __acquire(ext4_group_lock_ptr(sb, group)); ext4_unlock_group(sb, group); } return ret; } /* * Start prefetching @nr block bitmaps starting at @group. * Return the next group which needs to be prefetched. */ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, unsigned int nr, int *cnt) { ext4_group_t ngroups = ext4_get_groups_count(sb); struct buffer_head *bh; struct blk_plug plug; blk_start_plug(&plug); while (nr-- > 0) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); struct ext4_group_info *grp = ext4_get_group_info(sb, group); /* * Prefetch block groups with free blocks; but don't * bother if it is marked uninitialized on disk, since * it won't require I/O to read. Also only try to * prefetch once, so we avoid getblk() call, which can * be expensive. */ if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && EXT4_MB_GRP_NEED_INIT(grp) && ext4_free_group_clusters(sb, gdp) > 0 ) { bh = ext4_read_block_bitmap_nowait(sb, group, true); if (bh && !IS_ERR(bh)) { if (!buffer_uptodate(bh) && cnt) (*cnt)++; brelse(bh); } } if (++group >= ngroups) group = 0; } blk_finish_plug(&plug); return group; } /* * Prefetching reads the block bitmap into the buffer cache; but we * need to make sure that the buddy bitmap in the page cache has been * initialized. Note that ext4_mb_init_group() will block if the I/O * is not yet completed, or indeed if it was not initiated by * ext4_mb_prefetch did not start the I/O. * * TODO: We should actually kick off the buddy bitmap setup in a work * queue when the buffer I/O is completed, so that we don't block * waiting for the block allocation bitmap read to finish when * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). */ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, unsigned int nr) { struct ext4_group_desc *gdp; struct ext4_group_info *grp; while (nr-- > 0) { if (!group) group = ext4_get_groups_count(sb); group--; gdp = ext4_get_group_desc(sb, group, NULL); grp = ext4_get_group_info(sb, group); if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && ext4_free_group_clusters(sb, gdp) > 0) { if (ext4_mb_init_group(sb, group, GFP_NOFS)) break; } } } static noinline_for_stack int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { ext4_group_t prefetch_grp = 0, ngroups, group, i; enum criteria new_cr, cr = CR_GOAL_LEN_FAST; int err = 0, first_err = 0; unsigned int nr = 0, prefetch_ios = 0; struct ext4_sb_info *sbi; struct super_block *sb; struct ext4_buddy e4b; int lost; sb = ac->ac_sb; sbi = EXT4_SB(sb); ngroups = ext4_get_groups_count(sb); /* non-extent files are limited to low blocks/groups */ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) ngroups = sbi->s_blockfile_groups; BUG_ON(ac->ac_status == AC_STATUS_FOUND); /* first, try the goal */ err = ext4_mb_find_by_goal(ac, &e4b); if (err || ac->ac_status == AC_STATUS_FOUND) goto out; if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) goto out; /* * ac->ac_2order is set only if the fe_len is a power of 2 * if ac->ac_2order is set we also set criteria to CR_POWER2_ALIGNED * so that we try exact allocation using buddy. */ i = fls(ac->ac_g_ex.fe_len); ac->ac_2order = 0; /* * We search using buddy data only if the order of the request * is greater than equal to the sbi_s_mb_order2_reqs * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req * We also support searching for power-of-two requests only for * requests upto maximum buddy size we have constructed. */ if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { if (is_power_of_2(ac->ac_g_ex.fe_len)) ac->ac_2order = array_index_nospec(i - 1, MB_NUM_ORDERS(sb)); } /* if stream allocation is enabled, use global goal */ if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { /* TBD: may be hot point */ spin_lock(&sbi->s_md_lock); ac->ac_g_ex.fe_group = sbi->s_mb_last_group; ac->ac_g_ex.fe_start = sbi->s_mb_last_start; spin_unlock(&sbi->s_md_lock); } /* * Let's just scan groups to find more-less suitable blocks We * start with CR_GOAL_LEN_FAST, unless it is power of 2 * aligned, in which case let's do that faster approach first. */ if (ac->ac_2order) cr = CR_POWER2_ALIGNED; repeat: for (; cr < EXT4_MB_NUM_CRS && ac->ac_status == AC_STATUS_CONTINUE; cr++) { ac->ac_criteria = cr; /* * searching for the right group start * from the goal value specified */ group = ac->ac_g_ex.fe_group; ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; prefetch_grp = group; nr = 0; for (i = 0, new_cr = cr; i < ngroups; i++, ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { int ret = 0; cond_resched(); if (new_cr != cr) { cr = new_cr; goto repeat; } /* * Batch reads of the block allocation bitmaps * to get multiple READs in flight; limit * prefetching at inexpensive CR, otherwise mballoc * can spend a lot of time loading imperfect groups */ if ((prefetch_grp == group) && (ext4_mb_cr_expensive(cr) || prefetch_ios < sbi->s_mb_prefetch_limit)) { nr = sbi->s_mb_prefetch; if (ext4_has_feature_flex_bg(sb)) { nr = 1 << sbi->s_log_groups_per_flex; nr -= group & (nr - 1); nr = min(nr, sbi->s_mb_prefetch); } prefetch_grp = ext4_mb_prefetch(sb, group, nr, &prefetch_ios); } /* This now checks without needing the buddy page */ ret = ext4_mb_good_group_nolock(ac, group, cr); if (ret <= 0) { if (!first_err) first_err = ret; continue; } err = ext4_mb_load_buddy(sb, group, &e4b); if (err) goto out; ext4_lock_group(sb, group); /* * We need to check again after locking the * block group */ ret = ext4_mb_good_group(ac, group, cr); if (ret == 0) { ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); continue; } ac->ac_groups_scanned++; if (cr == CR_POWER2_ALIGNED) ext4_mb_simple_scan_group(ac, &e4b); else { bool is_stripe_aligned = (sbi->s_stripe >= sbi->s_cluster_ratio) && !(ac->ac_g_ex.fe_len % EXT4_NUM_B2C(sbi, sbi->s_stripe)); if ((cr == CR_GOAL_LEN_FAST || cr == CR_BEST_AVAIL_LEN) && is_stripe_aligned) ext4_mb_scan_aligned(ac, &e4b); if (ac->ac_status == AC_STATUS_CONTINUE) ext4_mb_complex_scan_group(ac, &e4b); } ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); if (ac->ac_status != AC_STATUS_CONTINUE) break; } /* Processed all groups and haven't found blocks */ if (sbi->s_mb_stats && i == ngroups) atomic64_inc(&sbi->s_bal_cX_failed[cr]); if (i == ngroups && ac->ac_criteria == CR_BEST_AVAIL_LEN) /* Reset goal length to original goal length before * falling into CR_GOAL_LEN_SLOW */ ac->ac_g_ex.fe_len = ac->ac_orig_goal_len; } if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { /* * We've been searching too long. Let's try to allocate * the best chunk we've found so far */ ext4_mb_try_best_found(ac, &e4b); if (ac->ac_status != AC_STATUS_FOUND) { /* * Someone more lucky has already allocated it. * The only thing we can do is just take first * found block(s) */ lost = atomic_inc_return(&sbi->s_mb_lost_chunks); mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len, lost); ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; ac->ac_flags |= EXT4_MB_HINT_FIRST; cr = CR_ANY_FREE; goto repeat; } } if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); out: if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) err = first_err; mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, ac->ac_flags, cr, err); if (nr) ext4_mb_prefetch_fini(sb, prefetch_grp, nr); return err; } static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) { struct super_block *sb = pde_data(file_inode(seq->file)); ext4_group_t group; if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) return NULL; group = *pos + 1; return (void *) ((unsigned long) group); } static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) { struct super_block *sb = pde_data(file_inode(seq->file)); ext4_group_t group; ++*pos; if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) return NULL; group = *pos + 1; return (void *) ((unsigned long) group); } static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) { struct super_block *sb = pde_data(file_inode(seq->file)); ext4_group_t group = (ext4_group_t) ((unsigned long) v); int i, err; char nbuf[16]; struct ext4_buddy e4b; struct ext4_group_info *grinfo; unsigned char blocksize_bits = min_t(unsigned char, sb->s_blocksize_bits, EXT4_MAX_BLOCK_LOG_SIZE); struct sg { struct ext4_group_info info; ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; } sg; group--; if (group == 0) seq_puts(seq, "#group: free frags first [" " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + sizeof(struct ext4_group_info); grinfo = ext4_get_group_info(sb, group); if (!grinfo) return 0; /* Load the group info in memory only if not already loaded. */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { seq_printf(seq, "#%-5u: %s\n", group, ext4_decode_error(NULL, err, nbuf)); return 0; } ext4_mb_unload_buddy(&e4b); } /* * We care only about free space counters in the group info and * these are safe to access even after the buddy has been unloaded */ memcpy(&sg, grinfo, i); seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, sg.info.bb_fragments, sg.info.bb_first_free); for (i = 0; i <= 13; i++) seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? sg.info.bb_counters[i] : 0); seq_puts(seq, " ]"); if (EXT4_MB_GRP_BBITMAP_CORRUPT(&sg.info)) seq_puts(seq, " Block bitmap corrupted!"); seq_putc(seq, '\n'); return 0; } static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) { } const struct seq_operations ext4_mb_seq_groups_ops = { .start = ext4_mb_seq_groups_start, .next = ext4_mb_seq_groups_next, .stop = ext4_mb_seq_groups_stop, .show = ext4_mb_seq_groups_show, }; int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) { struct super_block *sb = seq->private; struct ext4_sb_info *sbi = EXT4_SB(sb); seq_puts(seq, "mballoc:\n"); if (!sbi->s_mb_stats) { seq_puts(seq, "\tmb stats collection turned off.\n"); seq_puts( seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); return 0; } seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); /* CR_POWER2_ALIGNED stats */ seq_puts(seq, "\tcr_p2_aligned_stats:\n"); seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR_POWER2_ALIGNED])); seq_printf( seq, "\t\tgroups_considered: %llu\n", atomic64_read( &sbi->s_bal_cX_groups_considered[CR_POWER2_ALIGNED])); seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR_POWER2_ALIGNED])); seq_printf(seq, "\t\tuseless_loops: %llu\n", atomic64_read(&sbi->s_bal_cX_failed[CR_POWER2_ALIGNED])); seq_printf(seq, "\t\tbad_suggestions: %u\n", atomic_read(&sbi->s_bal_p2_aligned_bad_suggestions)); /* CR_GOAL_LEN_FAST stats */ seq_puts(seq, "\tcr_goal_fast_stats:\n"); seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_FAST])); seq_printf(seq, "\t\tgroups_considered: %llu\n", atomic64_read( &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_FAST])); seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_FAST])); seq_printf(seq, "\t\tuseless_loops: %llu\n", atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_FAST])); seq_printf(seq, "\t\tbad_suggestions: %u\n", atomic_read(&sbi->s_bal_goal_fast_bad_suggestions)); /* CR_BEST_AVAIL_LEN stats */ seq_puts(seq, "\tcr_best_avail_stats:\n"); seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR_BEST_AVAIL_LEN])); seq_printf( seq, "\t\tgroups_considered: %llu\n", atomic64_read( &sbi->s_bal_cX_groups_considered[CR_BEST_AVAIL_LEN])); seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR_BEST_AVAIL_LEN])); seq_printf(seq, "\t\tuseless_loops: %llu\n", atomic64_read(&sbi->s_bal_cX_failed[CR_BEST_AVAIL_LEN])); seq_printf(seq, "\t\tbad_suggestions: %u\n", atomic_read(&sbi->s_bal_best_avail_bad_suggestions)); /* CR_GOAL_LEN_SLOW stats */ seq_puts(seq, "\tcr_goal_slow_stats:\n"); seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR_GOAL_LEN_SLOW])); seq_printf(seq, "\t\tgroups_considered: %llu\n", atomic64_read( &sbi->s_bal_cX_groups_considered[CR_GOAL_LEN_SLOW])); seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR_GOAL_LEN_SLOW])); seq_printf(seq, "\t\tuseless_loops: %llu\n", atomic64_read(&sbi->s_bal_cX_failed[CR_GOAL_LEN_SLOW])); /* CR_ANY_FREE stats */ seq_puts(seq, "\tcr_any_free_stats:\n"); seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[CR_ANY_FREE])); seq_printf( seq, "\t\tgroups_considered: %llu\n", atomic64_read(&sbi->s_bal_cX_groups_considered[CR_ANY_FREE])); seq_printf(seq, "\t\textents_scanned: %u\n", atomic_read(&sbi->s_bal_cX_ex_scanned[CR_ANY_FREE])); seq_printf(seq, "\t\tuseless_loops: %llu\n", atomic64_read(&sbi->s_bal_cX_failed[CR_ANY_FREE])); /* Aggregates */ seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); seq_printf(seq, "\t\tlen_goal_hits: %u\n", atomic_read(&sbi->s_bal_len_goals)); seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); seq_printf(seq, "\tbuddies_generated: %u/%u\n", atomic_read(&sbi->s_mb_buddies_generated), ext4_get_groups_count(sb)); seq_printf(seq, "\tbuddies_time_used: %llu\n", atomic64_read(&sbi->s_mb_generation_time)); seq_printf(seq, "\tpreallocated: %u\n", atomic_read(&sbi->s_mb_preallocated)); seq_printf(seq, "\tdiscarded: %u\n", atomic_read(&sbi->s_mb_discarded)); return 0; } static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) { struct super_block *sb = pde_data(file_inode(seq->file)); unsigned long position; if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) return NULL; position = *pos + 1; return (void *) ((unsigned long) position); } static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) { struct super_block *sb = pde_data(file_inode(seq->file)); unsigned long position; ++*pos; if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) return NULL; position = *pos + 1; return (void *) ((unsigned long) position); } static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) { struct super_block *sb = pde_data(file_inode(seq->file)); struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned long position = ((unsigned long) v); struct ext4_group_info *grp; unsigned int count; position--; if (position >= MB_NUM_ORDERS(sb)) { position -= MB_NUM_ORDERS(sb); if (position == 0) seq_puts(seq, "avg_fragment_size_lists:\n"); count = 0; read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], bb_avg_fragment_size_node) count++; read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); seq_printf(seq, "\tlist_order_%u_groups: %u\n", (unsigned int)position, count); return 0; } if (position == 0) { seq_printf(seq, "optimize_scan: %d\n", test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); seq_puts(seq, "max_free_order_lists:\n"); } count = 0; read_lock(&sbi->s_mb_largest_free_orders_locks[position]); list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], bb_largest_free_order_node) count++; read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); seq_printf(seq, "\tlist_order_%u_groups: %u\n", (unsigned int)position, count); return 0; } static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) { } const struct seq_operations ext4_mb_seq_structs_summary_ops = { .start = ext4_mb_seq_structs_summary_start, .next = ext4_mb_seq_structs_summary_next, .stop = ext4_mb_seq_structs_summary_stop, .show = ext4_mb_seq_structs_summary_show, }; static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) { int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; BUG_ON(!cachep); return cachep; } /* * Allocate the top-level s_group_info array for the specified number * of groups */ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned size; struct ext4_group_info ***old_groupinfo, ***new_groupinfo; size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); if (size <= sbi->s_group_info_size) return 0; size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); new_groupinfo = kvzalloc(size, GFP_KERNEL); if (!new_groupinfo) { ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); return -ENOMEM; } rcu_read_lock(); old_groupinfo = rcu_dereference(sbi->s_group_info); if (old_groupinfo) memcpy(new_groupinfo, old_groupinfo, sbi->s_group_info_size * sizeof(*sbi->s_group_info)); rcu_read_unlock(); rcu_assign_pointer(sbi->s_group_info, new_groupinfo); sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); if (old_groupinfo) ext4_kvfree_array_rcu(old_groupinfo); ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", sbi->s_group_info_size); return 0; } /* Create and initialize ext4_group_info data for the given group. */ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, struct ext4_group_desc *desc) { int i; int metalen = 0; int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_info **meta_group_info; struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); /* * First check if this group is the first of a reserved block. * If it's true, we have to allocate a new table of pointers * to ext4_group_info structures */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb); meta_group_info = kmalloc(metalen, GFP_NOFS); if (meta_group_info == NULL) { ext4_msg(sb, KERN_ERR, "can't allocate mem " "for a buddy group"); return -ENOMEM; } rcu_read_lock(); rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; rcu_read_unlock(); } meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); if (meta_group_info[i] == NULL) { ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); goto exit_group_info; } set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(meta_group_info[i]->bb_state)); /* * initialize bb_free to be able to skip * empty groups without initialization */ if (ext4_has_group_desc_csum(sb) && (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { meta_group_info[i]->bb_free = ext4_free_clusters_after_init(sb, group, desc); } else { meta_group_info[i]->bb_free = ext4_free_group_clusters(sb, desc); } INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); init_rwsem(&meta_group_info[i]->alloc_sem); meta_group_info[i]->bb_free_root = RB_ROOT; INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ meta_group_info[i]->bb_group = group; mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); return 0; exit_group_info: /* If a meta_group_info table has been allocated, release it now */ if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { struct ext4_group_info ***group_info; rcu_read_lock(); group_info = rcu_dereference(sbi->s_group_info); kfree(group_info[idx]); group_info[idx] = NULL; rcu_read_unlock(); } return -ENOMEM; } /* ext4_mb_add_groupinfo */ static int ext4_mb_init_backend(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; struct ext4_sb_info *sbi = EXT4_SB(sb); int err; struct ext4_group_desc *desc; struct ext4_group_info ***group_info; struct kmem_cache *cachep; err = ext4_mb_alloc_groupinfo(sb, ngroups); if (err) return err; sbi->s_buddy_cache = new_inode(sb); if (sbi->s_buddy_cache == NULL) { ext4_msg(sb, KERN_ERR, "can't get new inode"); goto err_freesgi; } /* To avoid potentially colliding with an valid on-disk inode number, * use EXT4_BAD_INO for the buddy cache inode number. This inode is * not in the inode hash, so it should never be found by iget(), but * this will avoid confusion if it ever shows up during debugging. */ sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; for (i = 0; i < ngroups; i++) { cond_resched(); desc = ext4_get_group_desc(sb, i, NULL); if (desc == NULL) { ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); goto err_freebuddy; } if (ext4_mb_add_groupinfo(sb, i, desc) != 0) goto err_freebuddy; } if (ext4_has_feature_flex_bg(sb)) { /* a single flex group is supposed to be read by a single IO. * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is * unsigned integer, so the maximum shift is 32. */ if (sbi->s_es->s_log_groups_per_flex >= 32) { ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); goto err_freebuddy; } sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ } else { sbi->s_mb_prefetch = 32; } if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) sbi->s_mb_prefetch = ext4_get_groups_count(sb); /* * now many real IOs to prefetch within a single allocation at * CR_POWER2_ALIGNED. Given CR_POWER2_ALIGNED is an CPU-related * optimization we shouldn't try to load too many groups, at some point * we should start to use what we've got in memory. * with an average random access time 5ms, it'd take a second to get * 200 groups (* N with flex_bg), so let's make this limit 4 */ sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); return 0; err_freebuddy: cachep = get_groupinfo_cache(sb->s_blocksize_bits); while (i-- > 0) { struct ext4_group_info *grp = ext4_get_group_info(sb, i); if (grp) kmem_cache_free(cachep, grp); } i = sbi->s_group_info_size; rcu_read_lock(); group_info = rcu_dereference(sbi->s_group_info); while (i-- > 0) kfree(group_info[i]); rcu_read_unlock(); iput(sbi->s_buddy_cache); err_freesgi: rcu_read_lock(); kvfree(rcu_dereference(sbi->s_group_info)); rcu_read_unlock(); return -ENOMEM; } static void ext4_groupinfo_destroy_slabs(void) { int i; for (i = 0; i < NR_GRPINFO_CACHES; i++) { kmem_cache_destroy(ext4_groupinfo_caches[i]); ext4_groupinfo_caches[i] = NULL; } } static int ext4_groupinfo_create_slab(size_t size) { static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); int slab_size; int blocksize_bits = order_base_2(size); int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; struct kmem_cache *cachep; if (cache_index >= NR_GRPINFO_CACHES) return -EINVAL; if (unlikely(cache_index < 0)) cache_index = 0; mutex_lock(&ext4_grpinfo_slab_create_mutex); if (ext4_groupinfo_caches[cache_index]) { mutex_unlock(&ext4_grpinfo_slab_create_mutex); return 0; /* Already created */ } slab_size = offsetof(struct ext4_group_info, bb_counters[blocksize_bits + 2]); cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], slab_size, 0, SLAB_RECLAIM_ACCOUNT, NULL); ext4_groupinfo_caches[cache_index] = cachep; mutex_unlock(&ext4_grpinfo_slab_create_mutex); if (!cachep) { printk(KERN_EMERG "EXT4-fs: no memory for groupinfo slab cache\n"); return -ENOMEM; } return 0; } static void ext4_discard_work(struct work_struct *work) { struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info, s_discard_work); struct super_block *sb = sbi->s_sb; struct ext4_free_data *fd, *nfd; struct ext4_buddy e4b; LIST_HEAD(discard_list); ext4_group_t grp, load_grp; int err = 0; spin_lock(&sbi->s_md_lock); list_splice_init(&sbi->s_discard_list, &discard_list); spin_unlock(&sbi->s_md_lock); load_grp = UINT_MAX; list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { /* * If filesystem is umounting or no memory or suffering * from no space, give up the discard */ if ((sb->s_flags & SB_ACTIVE) && !err && !atomic_read(&sbi->s_retry_alloc_pending)) { grp = fd->efd_group; if (grp != load_grp) { if (load_grp != UINT_MAX) ext4_mb_unload_buddy(&e4b); err = ext4_mb_load_buddy(sb, grp, &e4b); if (err) { kmem_cache_free(ext4_free_data_cachep, fd); load_grp = UINT_MAX; continue; } else { load_grp = grp; } } ext4_lock_group(sb, grp); ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, fd->efd_start_cluster + fd->efd_count - 1, 1); ext4_unlock_group(sb, grp); } kmem_cache_free(ext4_free_data_cachep, fd); } if (load_grp != UINT_MAX) ext4_mb_unload_buddy(&e4b); } int ext4_mb_init(struct super_block *sb) { struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned i, j; unsigned offset, offset_incr; unsigned max; int ret; i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_offsets == NULL) { ret = -ENOMEM; goto out; } i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); if (sbi->s_mb_maxs == NULL) { ret = -ENOMEM; goto out; } ret = ext4_groupinfo_create_slab(sb->s_blocksize); if (ret < 0) goto out; /* order 0 is regular bitmap */ sbi->s_mb_maxs[0] = sb->s_blocksize << 3; sbi->s_mb_offsets[0] = 0; i = 1; offset = 0; offset_incr = 1 << (sb->s_blocksize_bits - 1); max = sb->s_blocksize << 2; do { sbi->s_mb_offsets[i] = offset; sbi->s_mb_maxs[i] = max; offset += offset_incr; offset_incr = offset_incr >> 1; max = max >> 1; i++; } while (i < MB_NUM_ORDERS(sb)); sbi->s_mb_avg_fragment_size = kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), GFP_KERNEL); if (!sbi->s_mb_avg_fragment_size) { ret = -ENOMEM; goto out; } sbi->s_mb_avg_fragment_size_locks = kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), GFP_KERNEL); if (!sbi->s_mb_avg_fragment_size_locks) { ret = -ENOMEM; goto out; } for (i = 0; i < MB_NUM_ORDERS(sb); i++) { INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); } sbi->s_mb_largest_free_orders = kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), GFP_KERNEL); if (!sbi->s_mb_largest_free_orders) { ret = -ENOMEM; goto out; } sbi->s_mb_largest_free_orders_locks = kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), GFP_KERNEL); if (!sbi->s_mb_largest_free_orders_locks) { ret = -ENOMEM; goto out; } for (i = 0; i < MB_NUM_ORDERS(sb); i++) { INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); } spin_lock_init(&sbi->s_md_lock); sbi->s_mb_free_pending = 0; INIT_LIST_HEAD(&sbi->s_freed_data_list[0]); INIT_LIST_HEAD(&sbi->s_freed_data_list[1]); INIT_LIST_HEAD(&sbi->s_discard_list); INIT_WORK(&sbi->s_discard_work, ext4_discard_work); atomic_set(&sbi->s_retry_alloc_pending, 0); sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; sbi->s_mb_stats = MB_DEFAULT_STATS; sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; sbi->s_mb_best_avail_max_trim_order = MB_DEFAULT_BEST_AVAIL_TRIM_ORDER; /* * The default group preallocation is 512, which for 4k block * sizes translates to 2 megabytes. However for bigalloc file * systems, this is probably too big (i.e, if the cluster size * is 1 megabyte, then group preallocation size becomes half a * gigabyte!). As a default, we will keep a two megabyte * group pralloc size for cluster sizes up to 64k, and after * that, we will force a minimum group preallocation size of * 32 clusters. This translates to 8 megs when the cluster * size is 256k, and 32 megs when the cluster size is 1 meg, * which seems reasonable as a default. */ sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> sbi->s_cluster_bits, 32); /* * If there is a s_stripe > 1, then we set the s_mb_group_prealloc * to the lowest multiple of s_stripe which is bigger than * the s_mb_group_prealloc as determined above. We want * the preallocation size to be an exact multiple of the * RAID stripe size so that preallocations don't fragment * the stripes. */ if (sbi->s_stripe > 1) { sbi->s_mb_group_prealloc = roundup( sbi->s_mb_group_prealloc, EXT4_NUM_B2C(sbi, sbi->s_stripe)); } sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); if (sbi->s_locality_groups == NULL) { ret = -ENOMEM; goto out; } for_each_possible_cpu(i) { struct ext4_locality_group *lg; lg = per_cpu_ptr(sbi->s_locality_groups, i); mutex_init(&lg->lg_mutex); for (j = 0; j < PREALLOC_TB_SIZE; j++) INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); spin_lock_init(&lg->lg_prealloc_lock); } if (bdev_nonrot(sb->s_bdev)) sbi->s_mb_max_linear_groups = 0; else sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; /* init file for buddy data */ ret = ext4_mb_init_backend(sb); if (ret != 0) goto out_free_locality_groups; return 0; out_free_locality_groups: free_percpu(sbi->s_locality_groups); sbi->s_locality_groups = NULL; out: kfree(sbi->s_mb_avg_fragment_size); kfree(sbi->s_mb_avg_fragment_size_locks); kfree(sbi->s_mb_largest_free_orders); kfree(sbi->s_mb_largest_free_orders_locks); kfree(sbi->s_mb_offsets); sbi->s_mb_offsets = NULL; kfree(sbi->s_mb_maxs); sbi->s_mb_maxs = NULL; return ret; } /* need to called with the ext4 group lock held */ static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) { struct ext4_prealloc_space *pa; struct list_head *cur, *tmp; int count = 0; list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); list_del(&pa->pa_group_list); count++; kmem_cache_free(ext4_pspace_cachep, pa); } return count; } void ext4_mb_release(struct super_block *sb) { ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t i; int num_meta_group_infos; struct ext4_group_info *grinfo, ***group_info; struct ext4_sb_info *sbi = EXT4_SB(sb); struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); int count; if (test_opt(sb, DISCARD)) { /* * wait the discard work to drain all of ext4_free_data */ flush_work(&sbi->s_discard_work); WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); } if (sbi->s_group_info) { for (i = 0; i < ngroups; i++) { cond_resched(); grinfo = ext4_get_group_info(sb, i); if (!grinfo) continue; mb_group_bb_bitmap_free(grinfo); ext4_lock_group(sb, i); count = ext4_mb_cleanup_pa(grinfo); if (count) mb_debug(sb, "mballoc: %d PAs left\n", count); ext4_unlock_group(sb, i); kmem_cache_free(cachep, grinfo); } num_meta_group_infos = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK_BITS(sb); rcu_read_lock(); group_info = rcu_dereference(sbi->s_group_info); for (i = 0; i < num_meta_group_infos; i++) kfree(group_info[i]); kvfree(group_info); rcu_read_unlock(); } kfree(sbi->s_mb_avg_fragment_size); kfree(sbi->s_mb_avg_fragment_size_locks); kfree(sbi->s_mb_largest_free_orders); kfree(sbi->s_mb_largest_free_orders_locks); kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); iput(sbi->s_buddy_cache); if (sbi->s_mb_stats) { ext4_msg(sb, KERN_INFO, "mballoc: %u blocks %u reqs (%u success)", atomic_read(&sbi->s_bal_allocated), atomic_read(&sbi->s_bal_reqs), atomic_read(&sbi->s_bal_success)); ext4_msg(sb, KERN_INFO, "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " "%u 2^N hits, %u breaks, %u lost", atomic_read(&sbi->s_bal_ex_scanned), atomic_read(&sbi->s_bal_groups_scanned), atomic_read(&sbi->s_bal_goals), atomic_read(&sbi->s_bal_2orders), atomic_read(&sbi->s_bal_breaks), atomic_read(&sbi->s_mb_lost_chunks)); ext4_msg(sb, KERN_INFO, "mballoc: %u generated and it took %llu", atomic_read(&sbi->s_mb_buddies_generated), atomic64_read(&sbi->s_mb_generation_time)); ext4_msg(sb, KERN_INFO, "mballoc: %u preallocated, %u discarded", atomic_read(&sbi->s_mb_preallocated), atomic_read(&sbi->s_mb_discarded)); } free_percpu(sbi->s_locality_groups); } static inline int ext4_issue_discard(struct super_block *sb, ext4_group_t block_group, ext4_grpblk_t cluster, int count) { ext4_fsblk_t discard_block; discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + ext4_group_first_block_no(sb, block_group)); count = EXT4_C2B(EXT4_SB(sb), count); trace_ext4_discard_blocks(sb, (unsigned long long) discard_block, count); return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); } static void ext4_free_data_in_buddy(struct super_block *sb, struct ext4_free_data *entry) { struct ext4_buddy e4b; struct ext4_group_info *db; int err, count = 0; mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", entry->efd_count, entry->efd_group, entry); err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); /* we expect to find existing buddy because it's pinned */ BUG_ON(err != 0); spin_lock(&EXT4_SB(sb)->s_md_lock); EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; spin_unlock(&EXT4_SB(sb)->s_md_lock); db = e4b.bd_info; /* there are blocks to put in buddy to make them really free */ count += entry->efd_count; ext4_lock_group(sb, entry->efd_group); /* Take it out of per group rb tree */ rb_erase(&entry->efd_node, &(db->bb_free_root)); mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); /* * Clear the trimmed flag for the group so that the next * ext4_trim_fs can trim it. */ EXT4_MB_GRP_CLEAR_TRIMMED(db); if (!db->bb_free_root.rb_node) { /* No more items in the per group rb tree * balance refcounts from ext4_mb_free_metadata() */ folio_put(e4b.bd_buddy_folio); folio_put(e4b.bd_bitmap_folio); } ext4_unlock_group(sb, entry->efd_group); ext4_mb_unload_buddy(&e4b); mb_debug(sb, "freed %d blocks in 1 structures\n", count); } /* * This function is called by the jbd2 layer once the commit has finished, * so we know we can free the blocks that were released with that commit. */ void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_free_data *entry, *tmp; LIST_HEAD(freed_data_list); struct list_head *s_freed_head = &sbi->s_freed_data_list[commit_tid & 1]; bool wake; list_replace_init(s_freed_head, &freed_data_list); list_for_each_entry(entry, &freed_data_list, efd_list) ext4_free_data_in_buddy(sb, entry); if (test_opt(sb, DISCARD)) { spin_lock(&sbi->s_md_lock); wake = list_empty(&sbi->s_discard_list); list_splice_tail(&freed_data_list, &sbi->s_discard_list); spin_unlock(&sbi->s_md_lock); if (wake) queue_work(system_unbound_wq, &sbi->s_discard_work); } else { list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) kmem_cache_free(ext4_free_data_cachep, entry); } } int __init ext4_init_mballoc(void) { ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, SLAB_RECLAIM_ACCOUNT); if (ext4_pspace_cachep == NULL) goto out; ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, SLAB_RECLAIM_ACCOUNT); if (ext4_ac_cachep == NULL) goto out_pa_free; ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, SLAB_RECLAIM_ACCOUNT); if (ext4_free_data_cachep == NULL) goto out_ac_free; return 0; out_ac_free: kmem_cache_destroy(ext4_ac_cachep); out_pa_free: kmem_cache_destroy(ext4_pspace_cachep); out: return -ENOMEM; } void ext4_exit_mballoc(void) { /* * Wait for completion of call_rcu()'s on ext4_pspace_cachep * before destroying the slab cache. */ rcu_barrier(); kmem_cache_destroy(ext4_pspace_cachep); kmem_cache_destroy(ext4_ac_cachep); kmem_cache_destroy(ext4_free_data_cachep); ext4_groupinfo_destroy_slabs(); } #define EXT4_MB_BITMAP_MARKED_CHECK 0x0001 #define EXT4_MB_SYNC_UPDATE 0x0002 static int ext4_mb_mark_context(handle_t *handle, struct super_block *sb, bool state, ext4_group_t group, ext4_grpblk_t blkoff, ext4_grpblk_t len, int flags, ext4_grpblk_t *ret_changed) { struct ext4_sb_info *sbi = EXT4_SB(sb); struct buffer_head *bitmap_bh = NULL; struct ext4_group_desc *gdp; struct buffer_head *gdp_bh; int err; unsigned int i, already, changed = len; KUNIT_STATIC_STUB_REDIRECT(ext4_mb_mark_context, handle, sb, state, group, blkoff, len, flags, ret_changed); if (ret_changed) *ret_changed = 0; bitmap_bh = ext4_read_block_bitmap(sb, group); if (IS_ERR(bitmap_bh)) return PTR_ERR(bitmap_bh); if (handle) { BUFFER_TRACE(bitmap_bh, "getting write access"); err = ext4_journal_get_write_access(handle, sb, bitmap_bh, EXT4_JTR_NONE); if (err) goto out_err; } err = -EIO; gdp = ext4_get_group_desc(sb, group, &gdp_bh); if (!gdp) goto out_err; if (handle) { BUFFER_TRACE(gdp_bh, "get_write_access"); err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); if (err) goto out_err; } ext4_lock_group(sb, group); if (ext4_has_group_desc_csum(sb) && (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); ext4_free_group_clusters_set(sb, gdp, ext4_free_clusters_after_init(sb, group, gdp)); } if (flags & EXT4_MB_BITMAP_MARKED_CHECK) { already = 0; for (i = 0; i < len; i++) if (mb_test_bit(blkoff + i, bitmap_bh->b_data) == state) already++; changed = len - already; } if (state) { mb_set_bits(bitmap_bh->b_data, blkoff, len); ext4_free_group_clusters_set(sb, gdp, ext4_free_group_clusters(sb, gdp) - changed); } else { mb_clear_bits(bitmap_bh->b_data, blkoff, len); ext4_free_group_clusters_set(sb, gdp, ext4_free_group_clusters(sb, gdp) + changed); } ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); ext4_group_desc_csum_set(sb, group, gdp); ext4_unlock_group(sb, group); if (ret_changed) *ret_changed = changed; if (sbi->s_log_groups_per_flex) { ext4_group_t flex_group = ext4_flex_group(sbi, group); struct flex_groups *fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group); if (state) atomic64_sub(changed, &fg->free_clusters); else atomic64_add(changed, &fg->free_clusters); } err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); if (err) goto out_err; err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); if (err) goto out_err; if (flags & EXT4_MB_SYNC_UPDATE) { sync_dirty_buffer(bitmap_bh); sync_dirty_buffer(gdp_bh); } out_err: brelse(bitmap_bh); return err; } /* * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps * Returns 0 if success or error code */ static noinline_for_stack int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, handle_t *handle, unsigned int reserv_clstrs) { struct ext4_group_desc *gdp; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block; int err, len; int flags = 0; ext4_grpblk_t changed; BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(ac->ac_b_ex.fe_len <= 0); sb = ac->ac_sb; sbi = EXT4_SB(sb); gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, NULL); if (!gdp) return -EIO; ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, ext4_free_group_clusters(sb, gdp)); block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata", block, block+len); /* File system mounted not to panic on error * Fix the bitmap and return EFSCORRUPTED * We leak some of the blocks here. */ err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len, 0, NULL); if (!err) err = -EFSCORRUPTED; return err; } #ifdef AGGRESSIVE_CHECK flags |= EXT4_MB_BITMAP_MARKED_CHECK; #endif err = ext4_mb_mark_context(handle, sb, true, ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len, flags, &changed); if (err && changed == 0) return err; #ifdef AGGRESSIVE_CHECK BUG_ON(changed != ac->ac_b_ex.fe_len); #endif percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); /* * Now reduce the dirty block count also. Should not go negative */ if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs); return err; } /* * Idempotent helper for Ext4 fast commit replay path to set the state of * blocks in bitmaps and update counters. */ void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, int len, bool state) { struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t group; ext4_grpblk_t blkoff; int err = 0; unsigned int clen, thisgrp_len; while (len > 0) { ext4_get_group_no_and_offset(sb, block, &group, &blkoff); /* * Check to see if we are freeing blocks across a group * boundary. * In case of flex_bg, this can happen that (block, len) may * span across more than one group. In that case we need to * get the corresponding group metadata to work with. * For this we have goto again loop. */ thisgrp_len = min_t(unsigned int, (unsigned int)len, EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); clen = EXT4_NUM_B2C(sbi, thisgrp_len); if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { ext4_error(sb, "Marking blocks in system zone - " "Block = %llu, len = %u", block, thisgrp_len); break; } err = ext4_mb_mark_context(NULL, sb, state, group, blkoff, clen, EXT4_MB_BITMAP_MARKED_CHECK | EXT4_MB_SYNC_UPDATE, NULL); if (err) break; block += thisgrp_len; len -= thisgrp_len; BUG_ON(len < 0); } } /* * here we normalize request for locality group * Group request are normalized to s_mb_group_prealloc, which goes to * s_strip if we set the same via mount option. * s_mb_group_prealloc can be configured via * /sys/fs/ext4/<partition>/mb_group_prealloc * * XXX: should we try to preallocate more than the group has now? */ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg = ac->ac_lg; BUG_ON(lg == NULL); ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); } /* * This function returns the next element to look at during inode * PA rbtree walk. We assume that we have held the inode PA rbtree lock * (ei->i_prealloc_lock) * * new_start The start of the range we want to compare * cur_start The existing start that we are comparing against * node The node of the rb_tree */ static inline struct rb_node* ext4_mb_pa_rb_next_iter(ext4_lblk_t new_start, ext4_lblk_t cur_start, struct rb_node *node) { if (new_start < cur_start) return node->rb_left; else return node->rb_right; } static inline void ext4_mb_pa_assert_overlap(struct ext4_allocation_context *ac, ext4_lblk_t start, loff_t end) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_prealloc_space *tmp_pa; ext4_lblk_t tmp_pa_start; loff_t tmp_pa_end; struct rb_node *iter; read_lock(&ei->i_prealloc_lock); for (iter = ei->i_prealloc_node.rb_node; iter; iter = ext4_mb_pa_rb_next_iter(start, tmp_pa_start, iter)) { tmp_pa = rb_entry(iter, struct ext4_prealloc_space, pa_node.inode_node); tmp_pa_start = tmp_pa->pa_lstart; tmp_pa_end = pa_logical_end(sbi, tmp_pa); spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted == 0) BUG_ON(!(start >= tmp_pa_end || end <= tmp_pa_start)); spin_unlock(&tmp_pa->pa_lock); } read_unlock(&ei->i_prealloc_lock); } /* * Given an allocation context "ac" and a range "start", "end", check * and adjust boundaries if the range overlaps with any of the existing * preallocatoins stored in the corresponding inode of the allocation context. * * Parameters: * ac allocation context * start start of the new range * end end of the new range */ static inline void ext4_mb_pa_adjust_overlap(struct ext4_allocation_context *ac, ext4_lblk_t *start, loff_t *end) { struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_prealloc_space *tmp_pa = NULL, *left_pa = NULL, *right_pa = NULL; struct rb_node *iter; ext4_lblk_t new_start, tmp_pa_start, right_pa_start = -1; loff_t new_end, tmp_pa_end, left_pa_end = -1; new_start = *start; new_end = *end; /* * Adjust the normalized range so that it doesn't overlap with any * existing preallocated blocks(PAs). Make sure to hold the rbtree lock * so it doesn't change underneath us. */ read_lock(&ei->i_prealloc_lock); /* Step 1: find any one immediate neighboring PA of the normalized range */ for (iter = ei->i_prealloc_node.rb_node; iter; iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, tmp_pa_start, iter)) { tmp_pa = rb_entry(iter, struct ext4_prealloc_space, pa_node.inode_node); tmp_pa_start = tmp_pa->pa_lstart; tmp_pa_end = pa_logical_end(sbi, tmp_pa); /* PA must not overlap original request */ spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted == 0) BUG_ON(!(ac->ac_o_ex.fe_logical >= tmp_pa_end || ac->ac_o_ex.fe_logical < tmp_pa_start)); spin_unlock(&tmp_pa->pa_lock); } /* * Step 2: check if the found PA is left or right neighbor and * get the other neighbor */ if (tmp_pa) { if (tmp_pa->pa_lstart < ac->ac_o_ex.fe_logical) { struct rb_node *tmp; left_pa = tmp_pa; tmp = rb_next(&left_pa->pa_node.inode_node); if (tmp) { right_pa = rb_entry(tmp, struct ext4_prealloc_space, pa_node.inode_node); } } else { struct rb_node *tmp; right_pa = tmp_pa; tmp = rb_prev(&right_pa->pa_node.inode_node); if (tmp) { left_pa = rb_entry(tmp, struct ext4_prealloc_space, pa_node.inode_node); } } } /* Step 3: get the non deleted neighbors */ if (left_pa) { for (iter = &left_pa->pa_node.inode_node;; iter = rb_prev(iter)) { if (!iter) { left_pa = NULL; break; } tmp_pa = rb_entry(iter, struct ext4_prealloc_space, pa_node.inode_node); left_pa = tmp_pa; spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted == 0) { spin_unlock(&tmp_pa->pa_lock); break; } spin_unlock(&tmp_pa->pa_lock); } } if (right_pa) { for (iter = &right_pa->pa_node.inode_node;; iter = rb_next(iter)) { if (!iter) { right_pa = NULL; break; } tmp_pa = rb_entry(iter, struct ext4_prealloc_space, pa_node.inode_node); right_pa = tmp_pa; spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted == 0) { spin_unlock(&tmp_pa->pa_lock); break; } spin_unlock(&tmp_pa->pa_lock); } } if (left_pa) { left_pa_end = pa_logical_end(sbi, left_pa); BUG_ON(left_pa_end > ac->ac_o_ex.fe_logical); } if (right_pa) { right_pa_start = right_pa->pa_lstart; BUG_ON(right_pa_start <= ac->ac_o_ex.fe_logical); } /* Step 4: trim our normalized range to not overlap with the neighbors */ if (left_pa) { if (left_pa_end > new_start) new_start = left_pa_end; } if (right_pa) { if (right_pa_start < new_end) new_end = right_pa_start; } read_unlock(&ei->i_prealloc_lock); /* XXX: extra loop to check we really don't overlap preallocations */ ext4_mb_pa_assert_overlap(ac, new_start, new_end); *start = new_start; *end = new_end; } /* * Normalization means making request better in terms of * size and alignment */ static noinline_for_stack void ext4_mb_normalize_request(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_super_block *es = sbi->s_es; int bsbits, max; loff_t size, start_off, end; loff_t orig_size __maybe_unused; ext4_lblk_t start; /* do normalize only data requests, metadata requests do not need preallocation */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; /* sometime caller may want exact blocks */ if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) return; /* caller may indicate that preallocation isn't * required (it's a tail, for example) */ if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) return; if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { ext4_mb_normalize_group_request(ac); return ; } bsbits = ac->ac_sb->s_blocksize_bits; /* first, let's learn actual file size * given current request is allocated */ size = extent_logical_end(sbi, &ac->ac_o_ex); size = size << bsbits; if (size < i_size_read(ac->ac_inode)) size = i_size_read(ac->ac_inode); orig_size = size; /* max size of free chunks */ max = 2 << bsbits; #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ (req <= (size) || max <= (chunk_size)) /* first, try to predict filesize */ /* XXX: should this table be tunable? */ start_off = 0; if (size <= 16 * 1024) { size = 16 * 1024; } else if (size <= 32 * 1024) { size = 32 * 1024; } else if (size <= 64 * 1024) { size = 64 * 1024; } else if (size <= 128 * 1024) { size = 128 * 1024; } else if (size <= 256 * 1024) { size = 256 * 1024; } else if (size <= 512 * 1024) { size = 512 * 1024; } else if (size <= 1024 * 1024) { size = 1024 * 1024; } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (21 - bsbits)) << 21; size = 2 * 1024 * 1024; } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (22 - bsbits)) << 22; size = 4 * 1024 * 1024; } else if (NRL_CHECK_SIZE(EXT4_C2B(sbi, ac->ac_o_ex.fe_len), (8<<20)>>bsbits, max, 8 * 1024)) { start_off = ((loff_t)ac->ac_o_ex.fe_logical >> (23 - bsbits)) << 23; size = 8 * 1024 * 1024; } else { start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; size = (loff_t) EXT4_C2B(sbi, ac->ac_o_ex.fe_len) << bsbits; } size = size >> bsbits; start = start_off >> bsbits; /* * For tiny groups (smaller than 8MB) the chosen allocation * alignment may be larger than group size. Make sure the * alignment does not move allocation to a different group which * makes mballoc fail assertions later. */ start = max(start, rounddown(ac->ac_o_ex.fe_logical, (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); /* avoid unnecessary preallocation that may trigger assertions */ if (start + size > EXT_MAX_BLOCKS) size = EXT_MAX_BLOCKS - start; /* don't cover already allocated blocks in selected range */ if (ar->pleft && start <= ar->lleft) { size -= ar->lleft + 1 - start; start = ar->lleft + 1; } if (ar->pright && start + size - 1 >= ar->lright) size -= start + size - ar->lright; /* * Trim allocation request for filesystems with artificially small * groups. */ if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); end = start + size; ext4_mb_pa_adjust_overlap(ac, &start, &end); size = end - start; /* * In this function "start" and "size" are normalized for better * alignment and length such that we could preallocate more blocks. * This normalization is done such that original request of * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and * "size" boundaries. * (Note fe_len can be relaxed since FS block allocation API does not * provide gurantee on number of contiguous blocks allocation since that * depends upon free space left, etc). * In case of inode pa, later we use the allocated blocks * [pa_pstart + fe_logical - pa_lstart, fe_len/size] from the preallocated * range of goal/best blocks [start, size] to put it at the * ac_o_ex.fe_logical extent of this inode. * (See ext4_mb_use_inode_pa() for more details) */ if (start + size <= ac->ac_o_ex.fe_logical || start > ac->ac_o_ex.fe_logical) { ext4_msg(ac->ac_sb, KERN_ERR, "start %lu, size %lu, fe_logical %lu", (unsigned long) start, (unsigned long) size, (unsigned long) ac->ac_o_ex.fe_logical); BUG(); } BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); /* now prepare goal request */ /* XXX: is it better to align blocks WRT to logical * placement or satisfy big request as is */ ac->ac_g_ex.fe_logical = start; ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; /* define goal start in order to merge */ if (ar->pright && (ar->lright == (start + size)) && ar->pright >= size && ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { /* merge to the right */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, &ac->ac_g_ex.fe_group, &ac->ac_g_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } if (ar->pleft && (ar->lleft + 1 == start) && ar->pleft + 1 < ext4_blocks_count(es)) { /* merge to the left */ ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, &ac->ac_g_ex.fe_group, &ac->ac_g_ex.fe_start); ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; } mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, orig_size, start); } static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { atomic_inc(&sbi->s_bal_reqs); atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) atomic_inc(&sbi->s_bal_success); atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); for (int i=0; i<EXT4_MB_NUM_CRS; i++) { atomic_add(ac->ac_cX_found[i], &sbi->s_bal_cX_ex_scanned[i]); } atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) atomic_inc(&sbi->s_bal_goals); /* did we allocate as much as normalizer originally wanted? */ if (ac->ac_f_ex.fe_len == ac->ac_orig_goal_len) atomic_inc(&sbi->s_bal_len_goals); if (ac->ac_found > sbi->s_mb_max_to_scan) atomic_inc(&sbi->s_bal_breaks); } if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) trace_ext4_mballoc_alloc(ac); else trace_ext4_mballoc_prealloc(ac); } /* * Called on failure; free up any blocks from the inode PA for this * context. We don't need this for MB_GROUP_PA because we only change * pa_free in ext4_mb_release_context(), but on failure, we've already * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. */ static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) { struct ext4_prealloc_space *pa = ac->ac_pa; struct ext4_buddy e4b; int err; if (pa == NULL) { if (ac->ac_f_ex.fe_len == 0) return; err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); if (WARN_RATELIMIT(err, "ext4: mb_load_buddy failed (%d)", err)) /* * This should never happen since we pin the * pages in the ext4_allocation_context so * ext4_mb_load_buddy() should never fail. */ return; ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, ac->ac_f_ex.fe_len); ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); ext4_mb_unload_buddy(&e4b); return; } if (pa->pa_type == MB_INODE_PA) { spin_lock(&pa->pa_lock); pa->pa_free += ac->ac_b_ex.fe_len; spin_unlock(&pa->pa_lock); } } /* * use blocks preallocated to inode */ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); ext4_fsblk_t start; ext4_fsblk_t end; int len; /* found preallocated blocks, use them */ start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); len = EXT4_NUM_B2C(sbi, end - start); ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, &ac->ac_b_ex.fe_start); ac->ac_b_ex.fe_len = len; ac->ac_status = AC_STATUS_FOUND; ac->ac_pa = pa; BUG_ON(start < pa->pa_pstart); BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); BUG_ON(pa->pa_free < len); BUG_ON(ac->ac_b_ex.fe_len <= 0); pa->pa_free -= len; mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); } /* * use blocks preallocated to locality group */ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { unsigned int len = ac->ac_o_ex.fe_len; ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, &ac->ac_b_ex.fe_group, &ac->ac_b_ex.fe_start); ac->ac_b_ex.fe_len = len; ac->ac_status = AC_STATUS_FOUND; ac->ac_pa = pa; /* we don't correct pa_pstart or pa_len here to avoid * possible race when the group is being loaded concurrently * instead we correct pa later, after blocks are marked * in on-disk bitmap -- see ext4_mb_release_context() * Other CPUs are prevented from allocating from this pa by lg_mutex */ mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", pa->pa_lstart, len, pa); } /* * Return the prealloc space that have minimal distance * from the goal block. @cpa is the prealloc * space that is having currently known minimal distance * from the goal block. */ static struct ext4_prealloc_space * ext4_mb_check_group_pa(ext4_fsblk_t goal_block, struct ext4_prealloc_space *pa, struct ext4_prealloc_space *cpa) { ext4_fsblk_t cur_distance, new_distance; if (cpa == NULL) { atomic_inc(&pa->pa_count); return pa; } cur_distance = abs(goal_block - cpa->pa_pstart); new_distance = abs(goal_block - pa->pa_pstart); if (cur_distance <= new_distance) return cpa; /* drop the previous reference */ atomic_dec(&cpa->pa_count); atomic_inc(&pa->pa_count); return pa; } /* * check if found pa meets EXT4_MB_HINT_GOAL_ONLY */ static bool ext4_mb_pa_goal_check(struct ext4_allocation_context *ac, struct ext4_prealloc_space *pa) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); ext4_fsblk_t start; if (likely(!(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))) return true; /* * If EXT4_MB_HINT_GOAL_ONLY is set, ac_g_ex will not be adjusted * in ext4_mb_normalize_request and will keep same with ac_o_ex * from ext4_mb_initialize_context. Choose ac_g_ex here to keep * consistent with ext4_mb_find_by_goal. */ start = pa->pa_pstart + (ac->ac_g_ex.fe_logical - pa->pa_lstart); if (ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex) != start) return false; if (ac->ac_g_ex.fe_len > pa->pa_len - EXT4_B2C(sbi, ac->ac_g_ex.fe_logical - pa->pa_lstart)) return false; return true; } /* * search goal blocks in preallocated space */ static noinline_for_stack bool ext4_mb_use_preallocated(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int order, i; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); struct ext4_locality_group *lg; struct ext4_prealloc_space *tmp_pa = NULL, *cpa = NULL; struct rb_node *iter; ext4_fsblk_t goal_block; /* only data can be preallocated */ if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return false; /* * first, try per-file preallocation by searching the inode pa rbtree. * * Here, we can't do a direct traversal of the tree because * ext4_mb_discard_group_preallocation() can paralelly mark the pa * deleted and that can cause direct traversal to skip some entries. */ read_lock(&ei->i_prealloc_lock); if (RB_EMPTY_ROOT(&ei->i_prealloc_node)) { goto try_group_pa; } /* * Step 1: Find a pa with logical start immediately adjacent to the * original logical start. This could be on the left or right. * * (tmp_pa->pa_lstart never changes so we can skip locking for it). */ for (iter = ei->i_prealloc_node.rb_node; iter; iter = ext4_mb_pa_rb_next_iter(ac->ac_o_ex.fe_logical, tmp_pa->pa_lstart, iter)) { tmp_pa = rb_entry(iter, struct ext4_prealloc_space, pa_node.inode_node); } /* * Step 2: The adjacent pa might be to the right of logical start, find * the left adjacent pa. After this step we'd have a valid tmp_pa whose * logical start is towards the left of original request's logical start */ if (tmp_pa->pa_lstart > ac->ac_o_ex.fe_logical) { struct rb_node *tmp; tmp = rb_prev(&tmp_pa->pa_node.inode_node); if (tmp) { tmp_pa = rb_entry(tmp, struct ext4_prealloc_space, pa_node.inode_node); } else { /* * If there is no adjacent pa to the left then finding * an overlapping pa is not possible hence stop searching * inode pa tree */ goto try_group_pa; } } BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); /* * Step 3: If the left adjacent pa is deleted, keep moving left to find * the first non deleted adjacent pa. After this step we should have a * valid tmp_pa which is guaranteed to be non deleted. */ for (iter = &tmp_pa->pa_node.inode_node;; iter = rb_prev(iter)) { if (!iter) { /* * no non deleted left adjacent pa, so stop searching * inode pa tree */ goto try_group_pa; } tmp_pa = rb_entry(iter, struct ext4_prealloc_space, pa_node.inode_node); spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted == 0) { /* * We will keep holding the pa_lock from * this point on because we don't want group discard * to delete this pa underneath us. Since group * discard is anyways an ENOSPC operation it * should be okay for it to wait a few more cycles. */ break; } else { spin_unlock(&tmp_pa->pa_lock); } } BUG_ON(!(tmp_pa && tmp_pa->pa_lstart <= ac->ac_o_ex.fe_logical)); BUG_ON(tmp_pa->pa_deleted == 1); /* * Step 4: We now have the non deleted left adjacent pa. Only this * pa can possibly satisfy the request hence check if it overlaps * original logical start and stop searching if it doesn't. */ if (ac->ac_o_ex.fe_logical >= pa_logical_end(sbi, tmp_pa)) { spin_unlock(&tmp_pa->pa_lock); goto try_group_pa; } /* non-extent files can't have physical blocks past 2^32 */ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && (tmp_pa->pa_pstart + EXT4_C2B(sbi, tmp_pa->pa_len) > EXT4_MAX_BLOCK_FILE_PHYS)) { /* * Since PAs don't overlap, we won't find any other PA to * satisfy this. */ spin_unlock(&tmp_pa->pa_lock); goto try_group_pa; } if (tmp_pa->pa_free && likely(ext4_mb_pa_goal_check(ac, tmp_pa))) { atomic_inc(&tmp_pa->pa_count); ext4_mb_use_inode_pa(ac, tmp_pa); spin_unlock(&tmp_pa->pa_lock); read_unlock(&ei->i_prealloc_lock); return true; } else { /* * We found a valid overlapping pa but couldn't use it because * it had no free blocks. This should ideally never happen * because: * * 1. When a new inode pa is added to rbtree it must have * pa_free > 0 since otherwise we won't actually need * preallocation. * * 2. An inode pa that is in the rbtree can only have it's * pa_free become zero when another thread calls: * ext4_mb_new_blocks * ext4_mb_use_preallocated * ext4_mb_use_inode_pa * * 3. Further, after the above calls make pa_free == 0, we will * immediately remove it from the rbtree in: * ext4_mb_new_blocks * ext4_mb_release_context * ext4_mb_put_pa * * 4. Since the pa_free becoming 0 and pa_free getting removed * from tree both happen in ext4_mb_new_blocks, which is always * called with i_data_sem held for data allocations, we can be * sure that another process will never see a pa in rbtree with * pa_free == 0. */ WARN_ON_ONCE(tmp_pa->pa_free == 0); } spin_unlock(&tmp_pa->pa_lock); try_group_pa: read_unlock(&ei->i_prealloc_lock); /* can we use group allocation? */ if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) return false; /* inode may have no locality group for some reason */ lg = ac->ac_lg; if (lg == NULL) return false; order = fls(ac->ac_o_ex.fe_len) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); /* * search for the prealloc space that is having * minimal distance from the goal block. */ for (i = order; i < PREALLOC_TB_SIZE; i++) { rcu_read_lock(); list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[i], pa_node.lg_list) { spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted == 0 && tmp_pa->pa_free >= ac->ac_o_ex.fe_len) { cpa = ext4_mb_check_group_pa(goal_block, tmp_pa, cpa); } spin_unlock(&tmp_pa->pa_lock); } rcu_read_unlock(); } if (cpa) { ext4_mb_use_group_pa(ac, cpa); return true; } return false; } /* * the function goes through all preallocation in this group and marks them * used in in-core bitmap. buddy must be generated from this bitmap * Need to be called with ext4 group lock held */ static noinline_for_stack void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, ext4_group_t group) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_prealloc_space *pa; struct list_head *cur; ext4_group_t groupnr; ext4_grpblk_t start; int preallocated = 0; int len; if (!grp) return; /* all form of preallocation discards first load group, * so the only competing code is preallocation use. * we don't need any locking here * notice we do NOT ignore preallocations with pa_deleted * otherwise we could leave used blocks available for * allocation in buddy when concurrent ext4_mb_put_pa() * is dropping preallocation */ list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); spin_lock(&pa->pa_lock); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &start); len = pa->pa_len; spin_unlock(&pa->pa_lock); if (unlikely(len == 0)) continue; BUG_ON(groupnr != group); mb_set_bits(bitmap, start, len); preallocated += len; } mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); } static void ext4_mb_mark_pa_deleted(struct super_block *sb, struct ext4_prealloc_space *pa) { struct ext4_inode_info *ei; if (pa->pa_deleted) { ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", pa->pa_type, pa->pa_pstart, pa->pa_lstart, pa->pa_len); return; } pa->pa_deleted = 1; if (pa->pa_type == MB_INODE_PA) { ei = EXT4_I(pa->pa_inode); atomic_dec(&ei->i_prealloc_active); } } static inline void ext4_mb_pa_free(struct ext4_prealloc_space *pa) { BUG_ON(!pa); BUG_ON(atomic_read(&pa->pa_count)); BUG_ON(pa->pa_deleted == 0); kmem_cache_free(ext4_pspace_cachep, pa); } static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); ext4_mb_pa_free(pa); } /* * drops a reference to preallocated space descriptor * if this was the last reference and the space is consumed */ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, struct super_block *sb, struct ext4_prealloc_space *pa) { ext4_group_t grp; ext4_fsblk_t grp_blk; struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); /* in this short window concurrent discard can set pa_deleted */ spin_lock(&pa->pa_lock); if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { spin_unlock(&pa->pa_lock); return; } if (pa->pa_deleted == 1) { spin_unlock(&pa->pa_lock); return; } ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; /* * If doing group-based preallocation, pa_pstart may be in the * next group when pa is used up */ if (pa->pa_type == MB_GROUP_PA) grp_blk--; grp = ext4_get_group_number(sb, grp_blk); /* * possible race: * * P1 (buddy init) P2 (regular allocation) * find block B in PA * copy on-disk bitmap to buddy * mark B in on-disk bitmap * drop PA from group * mark all PAs in buddy * * thus, P1 initializes buddy with B available. to prevent this * we make "copy" and "mark all PAs" atomic and serialize "drop PA" * against that pair */ ext4_lock_group(sb, grp); list_del(&pa->pa_group_list); ext4_unlock_group(sb, grp); if (pa->pa_type == MB_INODE_PA) { write_lock(pa->pa_node_lock.inode_lock); rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); write_unlock(pa->pa_node_lock.inode_lock); ext4_mb_pa_free(pa); } else { spin_lock(pa->pa_node_lock.lg_lock); list_del_rcu(&pa->pa_node.lg_list); spin_unlock(pa->pa_node_lock.lg_lock); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } } static void ext4_mb_pa_rb_insert(struct rb_root *root, struct rb_node *new) { struct rb_node **iter = &root->rb_node, *parent = NULL; struct ext4_prealloc_space *iter_pa, *new_pa; ext4_lblk_t iter_start, new_start; while (*iter) { iter_pa = rb_entry(*iter, struct ext4_prealloc_space, pa_node.inode_node); new_pa = rb_entry(new, struct ext4_prealloc_space, pa_node.inode_node); iter_start = iter_pa->pa_lstart; new_start = new_pa->pa_lstart; parent = *iter; if (new_start < iter_start) iter = &((*iter)->rb_left); else iter = &((*iter)->rb_right); } rb_link_node(new, parent, iter); rb_insert_color(new, root); } /* * creates new preallocated space for given inode */ static noinline_for_stack void ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_prealloc_space *pa; struct ext4_group_info *grp; struct ext4_inode_info *ei; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); BUG_ON(ac->ac_pa == NULL); pa = ac->ac_pa; if (ac->ac_b_ex.fe_len < ac->ac_orig_goal_len) { struct ext4_free_extent ex = { .fe_logical = ac->ac_g_ex.fe_logical, .fe_len = ac->ac_orig_goal_len, }; loff_t orig_goal_end = extent_logical_end(sbi, &ex); loff_t o_ex_end = extent_logical_end(sbi, &ac->ac_o_ex); /* * We can't allocate as much as normalizer wants, so we try * to get proper lstart to cover the original request, except * when the goal doesn't cover the original request as below: * * orig_ex:2045/2055(10), isize:8417280 -> normalized:0/2048 * best_ex:0/200(200) -> adjusted: 1848/2048(200) */ BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); /* * Use the below logic for adjusting best extent as it keeps * fragmentation in check while ensuring logical range of best * extent doesn't overflow out of goal extent: * * 1. Check if best ex can be kept at end of goal (before * cr_best_avail trimmed it) and still cover original start * 2. Else, check if best ex can be kept at start of goal and * still cover original end * 3. Else, keep the best ex at start of original request. */ ex.fe_len = ac->ac_b_ex.fe_len; ex.fe_logical = orig_goal_end - EXT4_C2B(sbi, ex.fe_len); if (ac->ac_o_ex.fe_logical >= ex.fe_logical) goto adjust_bex; ex.fe_logical = ac->ac_g_ex.fe_logical; if (o_ex_end <= extent_logical_end(sbi, &ex)) goto adjust_bex; ex.fe_logical = ac->ac_o_ex.fe_logical; adjust_bex: ac->ac_b_ex.fe_logical = ex.fe_logical; BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); BUG_ON(extent_logical_end(sbi, &ex) > orig_goal_end); } pa->pa_lstart = ac->ac_b_ex.fe_logical; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_INODE_PA; mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_inode_pa(ac, pa); atomic_add(pa->pa_free, &sbi->s_mb_preallocated); ext4_mb_use_inode_pa(ac, pa); ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); if (!grp) return; pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; pa->pa_inode = ac->ac_inode; list_add(&pa->pa_group_list, &grp->bb_prealloc_list); write_lock(pa->pa_node_lock.inode_lock); ext4_mb_pa_rb_insert(&ei->i_prealloc_node, &pa->pa_node.inode_node); write_unlock(pa->pa_node_lock.inode_lock); atomic_inc(&ei->i_prealloc_active); } /* * creates new preallocated space for locality group inodes belongs to */ static noinline_for_stack void ext4_mb_new_group_pa(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg; struct ext4_prealloc_space *pa; struct ext4_group_info *grp; /* preallocate only when found space is larger then requested */ BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); BUG_ON(ac->ac_status != AC_STATUS_FOUND); BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); BUG_ON(ac->ac_pa == NULL); pa = ac->ac_pa; pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); pa->pa_lstart = pa->pa_pstart; pa->pa_len = ac->ac_b_ex.fe_len; pa->pa_free = pa->pa_len; spin_lock_init(&pa->pa_lock); INIT_LIST_HEAD(&pa->pa_node.lg_list); INIT_LIST_HEAD(&pa->pa_group_list); pa->pa_deleted = 0; pa->pa_type = MB_GROUP_PA; mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, pa->pa_len, pa->pa_lstart); trace_ext4_mb_new_group_pa(ac, pa); ext4_mb_use_group_pa(ac, pa); atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); if (!grp) return; lg = ac->ac_lg; BUG_ON(lg == NULL); pa->pa_node_lock.lg_lock = &lg->lg_prealloc_lock; pa->pa_inode = NULL; list_add(&pa->pa_group_list, &grp->bb_prealloc_list); /* * We will later add the new pa to the right bucket * after updating the pa_free in ext4_mb_release_context */ } static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) { if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) ext4_mb_new_group_pa(ac); else ext4_mb_new_inode_pa(ac); } /* * finds all unused blocks in on-disk bitmap, frees them in * in-core bitmap and buddy. * @pa must be unlinked from inode and group lists, so that * nobody else can find/use it. * the caller MUST hold group/inode locks. * TODO: optimize the case when there are no in-core structures yet */ static noinline_for_stack void ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, struct ext4_prealloc_space *pa) { struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); unsigned int end; unsigned int next; ext4_group_t group; ext4_grpblk_t bit; unsigned long long grp_blk_start; int free = 0; BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); BUG_ON(group != e4b->bd_group && pa->pa_len != 0); end = bit + pa->pa_len; while (bit < end) { bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); if (bit >= end) break; next = mb_find_next_bit(bitmap_bh->b_data, end, bit); mb_debug(sb, "free preallocated %u/%u in group %u\n", (unsigned) ext4_group_first_block_no(sb, group) + bit, (unsigned) next - bit, (unsigned) group); free += next - bit; trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + EXT4_C2B(sbi, bit)), next - bit); mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); bit = next + 1; } if (free != pa->pa_free) { ext4_msg(e4b->bd_sb, KERN_CRIT, "pa %p: logic %lu, phys. %lu, len %d", pa, (unsigned long) pa->pa_lstart, (unsigned long) pa->pa_pstart, pa->pa_len); ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", free, pa->pa_free); /* * pa is already deleted so we use the value obtained * from the bitmap and continue. */ } atomic_add(free, &sbi->s_mb_discarded); } static noinline_for_stack void ext4_mb_release_group_pa(struct ext4_buddy *e4b, struct ext4_prealloc_space *pa) { struct super_block *sb = e4b->bd_sb; ext4_group_t group; ext4_grpblk_t bit; trace_ext4_mb_release_group_pa(sb, pa); BUG_ON(pa->pa_deleted == 0); ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); if (unlikely(group != e4b->bd_group && pa->pa_len != 0)) { ext4_warning(sb, "bad group: expected %u, group %u, pa_start %llu", e4b->bd_group, group, pa->pa_pstart); return; } mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); } /* * releases all preallocations in given group * * first, we need to decide discard policy: * - when do we discard * 1) ENOSPC * - how many do we discard * 1) how many requested */ static noinline_for_stack int ext4_mb_discard_group_preallocations(struct super_block *sb, ext4_group_t group, int *busy) { struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct buffer_head *bitmap_bh = NULL; struct ext4_prealloc_space *pa, *tmp; LIST_HEAD(list); struct ext4_buddy e4b; struct ext4_inode_info *ei; int err; int free = 0; if (!grp) return 0; mb_debug(sb, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) goto out_dbg; bitmap_bh = ext4_read_block_bitmap(sb, group); if (IS_ERR(bitmap_bh)) { err = PTR_ERR(bitmap_bh); ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", err, group); goto out_dbg; } err = ext4_mb_load_buddy(sb, group, &e4b); if (err) { ext4_warning(sb, "Error %d loading buddy information for %u", err, group); put_bh(bitmap_bh); goto out_dbg; } ext4_lock_group(sb, group); list_for_each_entry_safe(pa, tmp, &grp->bb_prealloc_list, pa_group_list) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { spin_unlock(&pa->pa_lock); *busy = 1; continue; } if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } /* seems this one can be freed ... */ ext4_mb_mark_pa_deleted(sb, pa); if (!free) this_cpu_inc(discard_pa_seq); /* we can trust pa_free ... */ free += pa->pa_free; spin_unlock(&pa->pa_lock); list_del(&pa->pa_group_list); list_add(&pa->u.pa_tmp_list, &list); } /* now free all selected PAs */ list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { /* remove from object (inode or locality group) */ if (pa->pa_type == MB_GROUP_PA) { spin_lock(pa->pa_node_lock.lg_lock); list_del_rcu(&pa->pa_node.lg_list); spin_unlock(pa->pa_node_lock.lg_lock); } else { write_lock(pa->pa_node_lock.inode_lock); ei = EXT4_I(pa->pa_inode); rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); write_unlock(pa->pa_node_lock.inode_lock); } list_del(&pa->u.pa_tmp_list); if (pa->pa_type == MB_GROUP_PA) { ext4_mb_release_group_pa(&e4b, pa); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } else { ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); ext4_mb_pa_free(pa); } } ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); out_dbg: mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", free, group, grp->bb_free); return free; } /* * releases all non-used preallocated blocks for given inode * * It's important to discard preallocations under i_data_sem * We don't want another block to be served from the prealloc * space when we are discarding the inode prealloc space. * * FIXME!! Make sure it is valid at all the call sites */ void ext4_discard_preallocations(struct inode *inode) { struct ext4_inode_info *ei = EXT4_I(inode); struct super_block *sb = inode->i_sb; struct buffer_head *bitmap_bh = NULL; struct ext4_prealloc_space *pa, *tmp; ext4_group_t group = 0; LIST_HEAD(list); struct ext4_buddy e4b; struct rb_node *iter; int err; if (!S_ISREG(inode->i_mode)) return; if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) return; mb_debug(sb, "discard preallocation for inode %lu\n", inode->i_ino); trace_ext4_discard_preallocations(inode, atomic_read(&ei->i_prealloc_active)); repeat: /* first, collect all pa's in the inode */ write_lock(&ei->i_prealloc_lock); for (iter = rb_first(&ei->i_prealloc_node); iter; iter = rb_next(iter)) { pa = rb_entry(iter, struct ext4_prealloc_space, pa_node.inode_node); BUG_ON(pa->pa_node_lock.inode_lock != &ei->i_prealloc_lock); spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { /* this shouldn't happen often - nobody should * use preallocation while we're discarding it */ spin_unlock(&pa->pa_lock); write_unlock(&ei->i_prealloc_lock); ext4_msg(sb, KERN_ERR, "uh-oh! used pa while discarding"); WARN_ON(1); schedule_timeout_uninterruptible(HZ); goto repeat; } if (pa->pa_deleted == 0) { ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); rb_erase(&pa->pa_node.inode_node, &ei->i_prealloc_node); list_add(&pa->u.pa_tmp_list, &list); continue; } /* someone is deleting pa right now */ spin_unlock(&pa->pa_lock); write_unlock(&ei->i_prealloc_lock); /* we have to wait here because pa_deleted * doesn't mean pa is already unlinked from * the list. as we might be called from * ->clear_inode() the inode will get freed * and concurrent thread which is unlinking * pa from inode's list may access already * freed memory, bad-bad-bad */ /* XXX: if this happens too often, we can * add a flag to force wait only in case * of ->clear_inode(), but not in case of * regular truncate */ schedule_timeout_uninterruptible(HZ); goto repeat; } write_unlock(&ei->i_prealloc_lock); list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { BUG_ON(pa->pa_type != MB_INODE_PA); group = ext4_get_group_number(sb, pa->pa_pstart); err = ext4_mb_load_buddy_gfp(sb, group, &e4b, GFP_NOFS|__GFP_NOFAIL); if (err) { ext4_error_err(sb, -err, "Error %d loading buddy information for %u", err, group); continue; } bitmap_bh = ext4_read_block_bitmap(sb, group); if (IS_ERR(bitmap_bh)) { err = PTR_ERR(bitmap_bh); ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", err, group); ext4_mb_unload_buddy(&e4b); continue; } ext4_lock_group(sb, group); list_del(&pa->pa_group_list); ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); put_bh(bitmap_bh); list_del(&pa->u.pa_tmp_list); ext4_mb_pa_free(pa); } } static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) { struct ext4_prealloc_space *pa; BUG_ON(ext4_pspace_cachep == NULL); pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); if (!pa) return -ENOMEM; atomic_set(&pa->pa_count, 1); ac->ac_pa = pa; return 0; } static void ext4_mb_pa_put_free(struct ext4_allocation_context *ac) { struct ext4_prealloc_space *pa = ac->ac_pa; BUG_ON(!pa); ac->ac_pa = NULL; WARN_ON(!atomic_dec_and_test(&pa->pa_count)); /* * current function is only called due to an error or due to * len of found blocks < len of requested blocks hence the PA has not * been added to grp->bb_prealloc_list. So we don't need to lock it */ pa->pa_deleted = 1; ext4_mb_pa_free(pa); } #ifdef CONFIG_EXT4_DEBUG static inline void ext4_mb_show_pa(struct super_block *sb) { ext4_group_t i, ngroups; if (ext4_emergency_state(sb)) return; ngroups = ext4_get_groups_count(sb); mb_debug(sb, "groups: "); for (i = 0; i < ngroups; i++) { struct ext4_group_info *grp = ext4_get_group_info(sb, i); struct ext4_prealloc_space *pa; ext4_grpblk_t start; struct list_head *cur; if (!grp) continue; ext4_lock_group(sb, i); list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); spin_lock(&pa->pa_lock); ext4_get_group_no_and_offset(sb, pa->pa_pstart, NULL, &start); spin_unlock(&pa->pa_lock); mb_debug(sb, "PA:%u:%d:%d\n", i, start, pa->pa_len); } ext4_unlock_group(sb, i); mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, grp->bb_fragments); } } static void ext4_mb_show_ac(struct ext4_allocation_context *ac) { struct super_block *sb = ac->ac_sb; if (ext4_emergency_state(sb)) return; mb_debug(sb, "Can't allocate:" " Allocation context details:"); mb_debug(sb, "status %u flags 0x%x", ac->ac_status, ac->ac_flags); mb_debug(sb, "orig %lu/%lu/%lu@%lu, " "goal %lu/%lu/%lu@%lu, " "best %lu/%lu/%lu@%lu cr %d", (unsigned long)ac->ac_o_ex.fe_group, (unsigned long)ac->ac_o_ex.fe_start, (unsigned long)ac->ac_o_ex.fe_len, (unsigned long)ac->ac_o_ex.fe_logical, (unsigned long)ac->ac_g_ex.fe_group, (unsigned long)ac->ac_g_ex.fe_start, (unsigned long)ac->ac_g_ex.fe_len, (unsigned long)ac->ac_g_ex.fe_logical, (unsigned long)ac->ac_b_ex.fe_group, (unsigned long)ac->ac_b_ex.fe_start, (unsigned long)ac->ac_b_ex.fe_len, (unsigned long)ac->ac_b_ex.fe_logical, (int)ac->ac_criteria); mb_debug(sb, "%u found", ac->ac_found); mb_debug(sb, "used pa: %s, ", str_yes_no(ac->ac_pa)); if (ac->ac_pa) mb_debug(sb, "pa_type %s\n", ac->ac_pa->pa_type == MB_GROUP_PA ? "group pa" : "inode pa"); ext4_mb_show_pa(sb); } #else static inline void ext4_mb_show_pa(struct super_block *sb) { } static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) { ext4_mb_show_pa(ac->ac_sb); } #endif /* * We use locality group preallocation for small size file. The size of the * file is determined by the current size or the resulting size after * allocation which ever is larger * * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req */ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); int bsbits = ac->ac_sb->s_blocksize_bits; loff_t size, isize; bool inode_pa_eligible, group_pa_eligible; if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) return; if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) return; group_pa_eligible = sbi->s_mb_group_prealloc > 0; inode_pa_eligible = true; size = extent_logical_end(sbi, &ac->ac_o_ex); isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) >> bsbits; /* No point in using inode preallocation for closed files */ if ((size == isize) && !ext4_fs_is_busy(sbi) && !inode_is_open_for_write(ac->ac_inode)) inode_pa_eligible = false; size = max(size, isize); /* Don't use group allocation for large files */ if (size > sbi->s_mb_stream_request) group_pa_eligible = false; if (!group_pa_eligible) { if (inode_pa_eligible) ac->ac_flags |= EXT4_MB_STREAM_ALLOC; else ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; return; } BUG_ON(ac->ac_lg != NULL); /* * locality group prealloc space are per cpu. The reason for having * per cpu locality group is to reduce the contention between block * request from multiple CPUs. */ ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); /* we're going to use group allocation */ ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; /* serialize all allocations in the group */ mutex_lock(&ac->ac_lg->lg_mutex); } static noinline_for_stack void ext4_mb_initialize_context(struct ext4_allocation_context *ac, struct ext4_allocation_request *ar) { struct super_block *sb = ar->inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_super_block *es = sbi->s_es; ext4_group_t group; unsigned int len; ext4_fsblk_t goal; ext4_grpblk_t block; /* we can't allocate > group size */ len = ar->len; /* just a dirty hack to filter too big requests */ if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) len = EXT4_CLUSTERS_PER_GROUP(sb); /* start searching from the goal */ goal = ar->goal; if (goal < le32_to_cpu(es->s_first_data_block) || goal >= ext4_blocks_count(es)) goal = le32_to_cpu(es->s_first_data_block); ext4_get_group_no_and_offset(sb, goal, &group, &block); /* set up allocation goals */ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); ac->ac_status = AC_STATUS_CONTINUE; ac->ac_sb = sb; ac->ac_inode = ar->inode; ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; ac->ac_o_ex.fe_group = group; ac->ac_o_ex.fe_start = block; ac->ac_o_ex.fe_len = len; ac->ac_g_ex = ac->ac_o_ex; ac->ac_orig_goal_len = ac->ac_g_ex.fe_len; ac->ac_flags = ar->flags; /* we have to define context: we'll work with a file or * locality group. this is a policy, actually */ ext4_mb_group_or_file(ac); mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " "left: %u/%u, right %u/%u to %swritable\n", (unsigned) ar->len, (unsigned) ar->logical, (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, (unsigned) ar->lleft, (unsigned) ar->pleft, (unsigned) ar->lright, (unsigned) ar->pright, inode_is_open_for_write(ar->inode) ? "" : "non-"); } static noinline_for_stack void ext4_mb_discard_lg_preallocations(struct super_block *sb, struct ext4_locality_group *lg, int order, int total_entries) { ext4_group_t group = 0; struct ext4_buddy e4b; LIST_HEAD(discard_list); struct ext4_prealloc_space *pa, *tmp; mb_debug(sb, "discard locality group preallocation\n"); spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], pa_node.lg_list, lockdep_is_held(&lg->lg_prealloc_lock)) { spin_lock(&pa->pa_lock); if (atomic_read(&pa->pa_count)) { /* * This is the pa that we just used * for block allocation. So don't * free that */ spin_unlock(&pa->pa_lock); continue; } if (pa->pa_deleted) { spin_unlock(&pa->pa_lock); continue; } /* only lg prealloc space */ BUG_ON(pa->pa_type != MB_GROUP_PA); /* seems this one can be freed ... */ ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_node.lg_list); list_add(&pa->u.pa_tmp_list, &discard_list); total_entries--; if (total_entries <= 5) { /* * we want to keep only 5 entries * allowing it to grow to 8. This * mak sure we don't call discard * soon for this list. */ break; } } spin_unlock(&lg->lg_prealloc_lock); list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { int err; group = ext4_get_group_number(sb, pa->pa_pstart); err = ext4_mb_load_buddy_gfp(sb, group, &e4b, GFP_NOFS|__GFP_NOFAIL); if (err) { ext4_error_err(sb, -err, "Error %d loading buddy information for %u", err, group); continue; } ext4_lock_group(sb, group); list_del(&pa->pa_group_list); ext4_mb_release_group_pa(&e4b, pa); ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); list_del(&pa->u.pa_tmp_list); call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); } } /* * We have incremented pa_count. So it cannot be freed at this * point. Also we hold lg_mutex. So no parallel allocation is * possible from this lg. That means pa_free cannot be updated. * * A parallel ext4_mb_discard_group_preallocations is possible. * which can cause the lg_prealloc_list to be updated. */ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) { int order, added = 0, lg_prealloc_count = 1; struct super_block *sb = ac->ac_sb; struct ext4_locality_group *lg = ac->ac_lg; struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; order = fls(pa->pa_free) - 1; if (order > PREALLOC_TB_SIZE - 1) /* The max size of hash table is PREALLOC_TB_SIZE */ order = PREALLOC_TB_SIZE - 1; /* Add the prealloc space to lg */ spin_lock(&lg->lg_prealloc_lock); list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], pa_node.lg_list, lockdep_is_held(&lg->lg_prealloc_lock)) { spin_lock(&tmp_pa->pa_lock); if (tmp_pa->pa_deleted) { spin_unlock(&tmp_pa->pa_lock); continue; } if (!added && pa->pa_free < tmp_pa->pa_free) { /* Add to the tail of the previous entry */ list_add_tail_rcu(&pa->pa_node.lg_list, &tmp_pa->pa_node.lg_list); added = 1; /* * we want to count the total * number of entries in the list */ } spin_unlock(&tmp_pa->pa_lock); lg_prealloc_count++; } if (!added) list_add_tail_rcu(&pa->pa_node.lg_list, &lg->lg_prealloc_list[order]); spin_unlock(&lg->lg_prealloc_lock); /* Now trim the list to be not more than 8 elements */ if (lg_prealloc_count > 8) ext4_mb_discard_lg_preallocations(sb, lg, order, lg_prealloc_count); } /* * release all resource we used in allocation */ static void ext4_mb_release_context(struct ext4_allocation_context *ac) { struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_prealloc_space *pa = ac->ac_pa; if (pa) { if (pa->pa_type == MB_GROUP_PA) { /* see comment in ext4_mb_use_group_pa() */ spin_lock(&pa->pa_lock); pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); pa->pa_free -= ac->ac_b_ex.fe_len; pa->pa_len -= ac->ac_b_ex.fe_len; spin_unlock(&pa->pa_lock); /* * We want to add the pa to the right bucket. * Remove it from the list and while adding * make sure the list to which we are adding * doesn't grow big. */ if (likely(pa->pa_free)) { spin_lock(pa->pa_node_lock.lg_lock); list_del_rcu(&pa->pa_node.lg_list); spin_unlock(pa->pa_node_lock.lg_lock); ext4_mb_add_n_trim(ac); } } ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_folio) folio_put(ac->ac_bitmap_folio); if (ac->ac_buddy_folio) folio_put(ac->ac_buddy_folio); if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); } static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) { ext4_group_t i, ngroups = ext4_get_groups_count(sb); int ret; int freed = 0, busy = 0; int retry = 0; trace_ext4_mb_discard_preallocations(sb, needed); if (needed == 0) needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; repeat: for (i = 0; i < ngroups && needed > 0; i++) { ret = ext4_mb_discard_group_preallocations(sb, i, &busy); freed += ret; needed -= ret; cond_resched(); } if (needed > 0 && busy && ++retry < 3) { busy = 0; goto repeat; } return freed; } static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, struct ext4_allocation_context *ac, u64 *seq) { int freed; u64 seq_retry = 0; bool ret = false; freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); if (freed) { ret = true; goto out_dbg; } seq_retry = ext4_get_discard_pa_seq_sum(); if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { ac->ac_flags |= EXT4_MB_STRICT_CHECK; *seq = seq_retry; ret = true; } out_dbg: mb_debug(sb, "freed %d, retry ? %s\n", freed, str_yes_no(ret)); return ret; } /* * Simple allocator for Ext4 fast commit replay path. It searches for blocks * linearly starting at the goal block and also excludes the blocks which * are going to be in use after fast commit replay. */ static ext4_fsblk_t ext4_mb_new_blocks_simple(struct ext4_allocation_request *ar, int *errp) { struct buffer_head *bitmap_bh; struct super_block *sb = ar->inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_group_t group, nr; ext4_grpblk_t blkoff; ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); ext4_grpblk_t i = 0; ext4_fsblk_t goal, block; struct ext4_super_block *es = sbi->s_es; goal = ar->goal; if (goal < le32_to_cpu(es->s_first_data_block) || goal >= ext4_blocks_count(es)) goal = le32_to_cpu(es->s_first_data_block); ar->len = 0; ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); for (nr = ext4_get_groups_count(sb); nr > 0; nr--) { bitmap_bh = ext4_read_block_bitmap(sb, group); if (IS_ERR(bitmap_bh)) { *errp = PTR_ERR(bitmap_bh); pr_warn("Failed to read block bitmap\n"); return 0; } while (1) { i = mb_find_next_zero_bit(bitmap_bh->b_data, max, blkoff); if (i >= max) break; if (ext4_fc_replay_check_excluded(sb, ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i))) { blkoff = i + 1; } else break; } brelse(bitmap_bh); if (i < max) break; if (++group >= ext4_get_groups_count(sb)) group = 0; blkoff = 0; } if (i >= max) { *errp = -ENOSPC; return 0; } block = ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, i); ext4_mb_mark_bb(sb, block, 1, true); ar->len = 1; *errp = 0; return block; } /* * Main entry point into mballoc to allocate blocks * it tries to use preallocation first, then falls back * to usual allocation */ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, struct ext4_allocation_request *ar, int *errp) { struct ext4_allocation_context *ac = NULL; struct ext4_sb_info *sbi; struct super_block *sb; ext4_fsblk_t block = 0; unsigned int inquota = 0; unsigned int reserv_clstrs = 0; int retries = 0; u64 seq; might_sleep(); sb = ar->inode->i_sb; sbi = EXT4_SB(sb); trace_ext4_request_blocks(ar); if (sbi->s_mount_state & EXT4_FC_REPLAY) return ext4_mb_new_blocks_simple(ar, errp); /* Allow to use superuser reservation for quota file */ if (ext4_is_quota_file(ar->inode)) ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { /* Without delayed allocation we need to verify * there is enough free blocks to do block allocation * and verify allocation doesn't exceed the quota limits. */ while (ar->len && ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { /* let others to free the space */ cond_resched(); ar->len = ar->len >> 1; } if (!ar->len) { ext4_mb_show_pa(sb); *errp = -ENOSPC; return 0; } reserv_clstrs = ar->len; if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { dquot_alloc_block_nofail(ar->inode, EXT4_C2B(sbi, ar->len)); } else { while (ar->len && dquot_alloc_block(ar->inode, EXT4_C2B(sbi, ar->len))) { ar->flags |= EXT4_MB_HINT_NOPREALLOC; ar->len--; } } inquota = ar->len; if (ar->len == 0) { *errp = -EDQUOT; goto out; } } ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); if (!ac) { ar->len = 0; *errp = -ENOMEM; goto out; } ext4_mb_initialize_context(ac, ar); ac->ac_op = EXT4_MB_HISTORY_PREALLOC; seq = this_cpu_read(discard_pa_seq); if (!ext4_mb_use_preallocated(ac)) { ac->ac_op = EXT4_MB_HISTORY_ALLOC; ext4_mb_normalize_request(ac, ar); *errp = ext4_mb_pa_alloc(ac); if (*errp) goto errout; repeat: /* allocate space in core */ *errp = ext4_mb_regular_allocator(ac); /* * pa allocated above is added to grp->bb_prealloc_list only * when we were able to allocate some block i.e. when * ac->ac_status == AC_STATUS_FOUND. * And error from above mean ac->ac_status != AC_STATUS_FOUND * So we have to free this pa here itself. */ if (*errp) { ext4_mb_pa_put_free(ac); ext4_discard_allocated_blocks(ac); goto errout; } if (ac->ac_status == AC_STATUS_FOUND && ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) ext4_mb_pa_put_free(ac); } if (likely(ac->ac_status == AC_STATUS_FOUND)) { *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); if (*errp) { ext4_discard_allocated_blocks(ac); goto errout; } else { block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); ar->len = ac->ac_b_ex.fe_len; } } else { if (++retries < 3 && ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) goto repeat; /* * If block allocation fails then the pa allocated above * needs to be freed here itself. */ ext4_mb_pa_put_free(ac); *errp = -ENOSPC; } if (*errp) { errout: ac->ac_b_ex.fe_len = 0; ar->len = 0; ext4_mb_show_ac(ac); } ext4_mb_release_context(ac); kmem_cache_free(ext4_ac_cachep, ac); out: if (inquota && ar->len < inquota) dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); if (!ar->len) { if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) /* release all the reserved blocks if non delalloc */ percpu_counter_sub(&sbi->s_dirtyclusters_counter, reserv_clstrs); } trace_ext4_allocate_blocks(ar, (unsigned long long)block); return block; } /* * We can merge two free data extents only if the physical blocks * are contiguous, AND the extents were freed by the same transaction, * AND the blocks are associated with the same group. */ static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, struct ext4_free_data *entry, struct ext4_free_data *new_entry, struct rb_root *entry_rb_root) { if ((entry->efd_tid != new_entry->efd_tid) || (entry->efd_group != new_entry->efd_group)) return; if (entry->efd_start_cluster + entry->efd_count == new_entry->efd_start_cluster) { new_entry->efd_start_cluster = entry->efd_start_cluster; new_entry->efd_count += entry->efd_count; } else if (new_entry->efd_start_cluster + new_entry->efd_count == entry->efd_start_cluster) { new_entry->efd_count += entry->efd_count; } else return; spin_lock(&sbi->s_md_lock); list_del(&entry->efd_list); spin_unlock(&sbi->s_md_lock); rb_erase(&entry->efd_node, entry_rb_root); kmem_cache_free(ext4_free_data_cachep, entry); } static noinline_for_stack void ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, struct ext4_free_data *new_entry) { ext4_group_t group = e4b->bd_group; ext4_grpblk_t cluster; ext4_grpblk_t clusters = new_entry->efd_count; struct ext4_free_data *entry; struct ext4_group_info *db = e4b->bd_info; struct super_block *sb = e4b->bd_sb; struct ext4_sb_info *sbi = EXT4_SB(sb); struct rb_node **n = &db->bb_free_root.rb_node, *node; struct rb_node *parent = NULL, *new_node; BUG_ON(!ext4_handle_valid(handle)); BUG_ON(e4b->bd_bitmap_folio == NULL); BUG_ON(e4b->bd_buddy_folio == NULL); new_node = &new_entry->efd_node; cluster = new_entry->efd_start_cluster; if (!*n) { /* first free block exent. We need to protect buddy cache from being freed, * otherwise we'll refresh it from * on-disk bitmap and lose not-yet-available * blocks */ folio_get(e4b->bd_buddy_folio); folio_get(e4b->bd_bitmap_folio); } while (*n) { parent = *n; entry = rb_entry(parent, struct ext4_free_data, efd_node); if (cluster < entry->efd_start_cluster) n = &(*n)->rb_left; else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) n = &(*n)->rb_right; else { ext4_grp_locked_error(sb, group, 0, ext4_group_first_block_no(sb, group) + EXT4_C2B(sbi, cluster), "Block already on to-be-freed list"); kmem_cache_free(ext4_free_data_cachep, new_entry); return; } } rb_link_node(new_node, parent, n); rb_insert_color(new_node, &db->bb_free_root); /* Now try to see the extent can be merged to left and right */ node = rb_prev(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, efd_node); ext4_try_merge_freed_extent(sbi, entry, new_entry, &(db->bb_free_root)); } node = rb_next(new_node); if (node) { entry = rb_entry(node, struct ext4_free_data, efd_node); ext4_try_merge_freed_extent(sbi, entry, new_entry, &(db->bb_free_root)); } spin_lock(&sbi->s_md_lock); list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list[new_entry->efd_tid & 1]); sbi->s_mb_free_pending += clusters; spin_unlock(&sbi->s_md_lock); } static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, unsigned long count) { struct super_block *sb = inode->i_sb; ext4_group_t group; ext4_grpblk_t blkoff; ext4_get_group_no_and_offset(sb, block, &group, &blkoff); ext4_mb_mark_context(NULL, sb, false, group, blkoff, count, EXT4_MB_BITMAP_MARKED_CHECK | EXT4_MB_SYNC_UPDATE, NULL); } /** * ext4_mb_clear_bb() -- helper function for freeing blocks. * Used by ext4_free_blocks() * @handle: handle for this transaction * @inode: inode * @block: starting physical block to be freed * @count: number of blocks to be freed * @flags: flags used by ext4_free_blocks */ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, ext4_fsblk_t block, unsigned long count, int flags) { struct super_block *sb = inode->i_sb; struct ext4_group_info *grp; unsigned int overflow; ext4_grpblk_t bit; ext4_group_t block_group; struct ext4_sb_info *sbi; struct ext4_buddy e4b; unsigned int count_clusters; int err = 0; int mark_flags = 0; ext4_grpblk_t changed; sbi = EXT4_SB(sb); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && !ext4_inode_block_valid(inode, block, count)) { ext4_error(sb, "Freeing blocks in system zone - " "Block = %llu, count = %lu", block, count); /* err = 0. ext4_std_error should be a no op */ goto error_out; } flags |= EXT4_FREE_BLOCKS_VALIDATED; do_more: overflow = 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); grp = ext4_get_group_info(sb, block_group); if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) return; /* * Check to see if we are freeing blocks across a group * boundary. */ if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { overflow = EXT4_C2B(sbi, bit) + count - EXT4_BLOCKS_PER_GROUP(sb); count -= overflow; /* The range changed so it's no longer validated */ flags &= ~EXT4_FREE_BLOCKS_VALIDATED; } count_clusters = EXT4_NUM_B2C(sbi, count); trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, GFP_NOFS|__GFP_NOFAIL); if (err) goto error_out; if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && !ext4_inode_block_valid(inode, block, count)) { ext4_error(sb, "Freeing blocks in system zone - " "Block = %llu, count = %lu", block, count); /* err = 0. ext4_std_error should be a no op */ goto error_clean; } #ifdef AGGRESSIVE_CHECK mark_flags |= EXT4_MB_BITMAP_MARKED_CHECK; #endif err = ext4_mb_mark_context(handle, sb, false, block_group, bit, count_clusters, mark_flags, &changed); if (err && changed == 0) goto error_clean; #ifdef AGGRESSIVE_CHECK BUG_ON(changed != count_clusters); #endif /* * We need to make sure we don't reuse the freed block until after the * transaction is committed. We make an exception if the inode is to be * written in writeback mode since writeback mode has weak data * consistency guarantees. */ if (ext4_handle_valid(handle) && ((flags & EXT4_FREE_BLOCKS_METADATA) || !ext4_should_writeback_data(inode))) { struct ext4_free_data *new_entry; /* * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed * to fail. */ new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS|__GFP_NOFAIL); new_entry->efd_start_cluster = bit; new_entry->efd_group = block_group; new_entry->efd_count = count_clusters; new_entry->efd_tid = handle->h_transaction->t_tid; ext4_lock_group(sb, block_group); ext4_mb_free_metadata(handle, &e4b, new_entry); } else { if (test_opt(sb, DISCARD)) { err = ext4_issue_discard(sb, block_group, bit, count_clusters); /* * Ignore EOPNOTSUPP error. This is consistent with * what happens when using journal. */ if (err == -EOPNOTSUPP) err = 0; if (err) ext4_msg(sb, KERN_WARNING, "discard request in" " group:%u block:%d count:%lu failed" " with %d", block_group, bit, count, err); } EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); ext4_lock_group(sb, block_group); mb_free_blocks(inode, &e4b, bit, count_clusters); } ext4_unlock_group(sb, block_group); /* * on a bigalloc file system, defer the s_freeclusters_counter * update to the caller (ext4_remove_space and friends) so they * can determine if a cluster freed here should be rereserved */ if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); } if (overflow && !err) { block += count; count = overflow; ext4_mb_unload_buddy(&e4b); /* The range changed so it's no longer validated */ flags &= ~EXT4_FREE_BLOCKS_VALIDATED; goto do_more; } error_clean: ext4_mb_unload_buddy(&e4b); error_out: ext4_std_error(sb, err); } /** * ext4_free_blocks() -- Free given blocks and update quota * @handle: handle for this transaction * @inode: inode * @bh: optional buffer of the block to be freed * @block: starting physical block to be freed * @count: number of blocks to be freed * @flags: flags used by ext4_free_blocks */ void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags) { struct super_block *sb = inode->i_sb; unsigned int overflow; struct ext4_sb_info *sbi; sbi = EXT4_SB(sb); if (bh) { if (block) BUG_ON(block != bh->b_blocknr); else block = bh->b_blocknr; } if (sbi->s_mount_state & EXT4_FC_REPLAY) { ext4_free_blocks_simple(inode, block, EXT4_NUM_B2C(sbi, count)); return; } might_sleep(); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && !ext4_inode_block_valid(inode, block, count)) { ext4_error(sb, "Freeing blocks not in datazone - " "block = %llu, count = %lu", block, count); return; } flags |= EXT4_FREE_BLOCKS_VALIDATED; ext4_debug("freeing block %llu\n", block); trace_ext4_free_blocks(inode, block, count, flags); if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { BUG_ON(count > 1); ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, inode, bh, block); } /* * If the extent to be freed does not begin on a cluster * boundary, we need to deal with partial clusters at the * beginning and end of the extent. Normally we will free * blocks at the beginning or the end unless we are explicitly * requested to avoid doing so. */ overflow = EXT4_PBLK_COFF(sbi, block); if (overflow) { if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { overflow = sbi->s_cluster_ratio - overflow; block += overflow; if (count > overflow) count -= overflow; else return; } else { block -= overflow; count += overflow; } /* The range changed so it's no longer validated */ flags &= ~EXT4_FREE_BLOCKS_VALIDATED; } overflow = EXT4_LBLK_COFF(sbi, count); if (overflow) { if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { if (count > overflow) count -= overflow; else return; } else count += sbi->s_cluster_ratio - overflow; /* The range changed so it's no longer validated */ flags &= ~EXT4_FREE_BLOCKS_VALIDATED; } if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { int i; int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; for (i = 0; i < count; i++) { cond_resched(); if (is_metadata) bh = sb_find_get_block(inode->i_sb, block + i); ext4_forget(handle, is_metadata, inode, bh, block + i); } } ext4_mb_clear_bb(handle, inode, block, count, flags); } /** * ext4_group_add_blocks() -- Add given blocks to an existing group * @handle: handle to this transaction * @sb: super block * @block: start physical block to add to the block group * @count: number of blocks to free * * This marks the blocks as free in the bitmap and buddy. */ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ext4_fsblk_t block, unsigned long count) { ext4_group_t block_group; ext4_grpblk_t bit; struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_buddy e4b; int err = 0; ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); unsigned long cluster_count = last_cluster - first_cluster + 1; ext4_grpblk_t changed; ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); if (cluster_count == 0) return 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); /* * Check to see if we are freeing blocks across a group * boundary. */ if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { ext4_warning(sb, "too many blocks added to group %u", block_group); err = -EINVAL; goto error_out; } err = ext4_mb_load_buddy(sb, block_group, &e4b); if (err) goto error_out; if (!ext4_sb_block_valid(sb, NULL, block, count)) { ext4_error(sb, "Adding blocks in system zones - " "Block = %llu, count = %lu", block, count); err = -EINVAL; goto error_clean; } err = ext4_mb_mark_context(handle, sb, false, block_group, bit, cluster_count, EXT4_MB_BITMAP_MARKED_CHECK, &changed); if (err && changed == 0) goto error_clean; if (changed != cluster_count) ext4_error(sb, "bit already cleared in group %u", block_group); ext4_lock_group(sb, block_group); mb_free_blocks(NULL, &e4b, bit, cluster_count); ext4_unlock_group(sb, block_group); percpu_counter_add(&sbi->s_freeclusters_counter, changed); error_clean: ext4_mb_unload_buddy(&e4b); error_out: ext4_std_error(sb, err); return err; } /** * ext4_trim_extent -- function to TRIM one single free extent in the group * @sb: super block for the file system * @start: starting block of the free extent in the alloc. group * @count: number of blocks to TRIM * @e4b: ext4 buddy for the group * * Trim "count" blocks starting at "start" in the "group". To assure that no * one will allocate those blocks, mark it as used in buddy bitmap. This must * be called with under the group lock. */ static int ext4_trim_extent(struct super_block *sb, int start, int count, struct ext4_buddy *e4b) __releases(bitlock) __acquires(bitlock) { struct ext4_free_extent ex; ext4_group_t group = e4b->bd_group; int ret = 0; trace_ext4_trim_extent(sb, group, start, count); assert_spin_locked(ext4_group_lock_ptr(sb, group)); ex.fe_start = start; ex.fe_group = group; ex.fe_len = count; /* * Mark blocks used, so no one can reuse them while * being trimmed. */ mb_mark_used(e4b, &ex); ext4_unlock_group(sb, group); ret = ext4_issue_discard(sb, group, start, count); ext4_lock_group(sb, group); mb_free_blocks(NULL, e4b, start, ex.fe_len); return ret; } static ext4_grpblk_t ext4_last_grp_cluster(struct super_block *sb, ext4_group_t grp) { unsigned long nr_clusters_in_group; if (grp < (ext4_get_groups_count(sb) - 1)) nr_clusters_in_group = EXT4_CLUSTERS_PER_GROUP(sb); else nr_clusters_in_group = (ext4_blocks_count(EXT4_SB(sb)->s_es) - ext4_group_first_block_no(sb, grp)) >> EXT4_CLUSTER_BITS(sb); return nr_clusters_in_group - 1; } static bool ext4_trim_interrupted(void) { return fatal_signal_pending(current) || freezing(current); } static int ext4_try_to_trim_range(struct super_block *sb, struct ext4_buddy *e4b, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) { ext4_grpblk_t next, count, free_count, last, origin_start; bool set_trimmed = false; void *bitmap; if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) return 0; last = ext4_last_grp_cluster(sb, e4b->bd_group); bitmap = e4b->bd_bitmap; if (start == 0 && max >= last) set_trimmed = true; origin_start = start; start = max(e4b->bd_info->bb_first_free, start); count = 0; free_count = 0; while (start <= max) { start = mb_find_next_zero_bit(bitmap, max + 1, start); if (start > max) break; next = mb_find_next_bit(bitmap, last + 1, start); if (origin_start == 0 && next >= last) set_trimmed = true; if ((next - start) >= minblocks) { int ret = ext4_trim_extent(sb, start, next - start, e4b); if (ret && ret != -EOPNOTSUPP) return count; count += next - start; } free_count += next - start; start = next + 1; if (ext4_trim_interrupted()) return count; if (need_resched()) { ext4_unlock_group(sb, e4b->bd_group); cond_resched(); ext4_lock_group(sb, e4b->bd_group); } if ((e4b->bd_info->bb_free - free_count) < minblocks) break; } if (set_trimmed) EXT4_MB_GRP_SET_TRIMMED(e4b->bd_info); return count; } /** * ext4_trim_all_free -- function to trim all free space in alloc. group * @sb: super block for file system * @group: group to be trimmed * @start: first group block to examine * @max: last group block to examine * @minblocks: minimum extent block count * * ext4_trim_all_free walks through group's block bitmap searching for free * extents. When the free extent is found, mark it as used in group buddy * bitmap. Then issue a TRIM command on this extent and free the extent in * the group buddy bitmap. */ static ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, ext4_group_t group, ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) { struct ext4_buddy e4b; int ret; trace_ext4_trim_all_free(sb, group, start, max); ret = ext4_mb_load_buddy(sb, group, &e4b); if (ret) { ext4_warning(sb, "Error %d loading buddy information for %u", ret, group); return ret; } ext4_lock_group(sb, group); if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || minblocks < EXT4_SB(sb)->s_last_trim_minblks) ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); else ret = 0; ext4_unlock_group(sb, group); ext4_mb_unload_buddy(&e4b); ext4_debug("trimmed %d blocks in the group %d\n", ret, group); return ret; } /** * ext4_trim_fs() -- trim ioctl handle function * @sb: superblock for filesystem * @range: fstrim_range structure * * start: First Byte to trim * len: number of Bytes to trim from start * minlen: minimum extent length in Bytes * ext4_trim_fs goes through all allocation groups containing Bytes from * start to start+len. For each such a group ext4_trim_all_free function * is invoked to trim all free space. */ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) { unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); struct ext4_group_info *grp; ext4_group_t group, first_group, last_group; ext4_grpblk_t cnt = 0, first_cluster, last_cluster; uint64_t start, end, minlen, trimmed = 0; ext4_fsblk_t first_data_blk = le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); int ret = 0; start = range->start >> sb->s_blocksize_bits; end = start + (range->len >> sb->s_blocksize_bits) - 1; minlen = EXT4_NUM_B2C(EXT4_SB(sb), range->minlen >> sb->s_blocksize_bits); if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || start >= max_blks || range->len < sb->s_blocksize) return -EINVAL; /* No point to try to trim less than discard granularity */ if (range->minlen < discard_granularity) { minlen = EXT4_NUM_B2C(EXT4_SB(sb), discard_granularity >> sb->s_blocksize_bits); if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) goto out; } if (end >= max_blks - 1) end = max_blks - 1; if (end <= first_data_blk) goto out; if (start < first_data_blk) start = first_data_blk; /* Determine first and last group to examine based on start and end */ ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, &first_group, &first_cluster); ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, &last_group, &last_cluster); /* end now represents the last cluster to discard in this group */ end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; for (group = first_group; group <= last_group; group++) { if (ext4_trim_interrupted()) break; grp = ext4_get_group_info(sb, group); if (!grp) continue; /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { ret = ext4_mb_init_group(sb, group, GFP_NOFS); if (ret) break; } /* * For all the groups except the last one, last cluster will * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to * change it for the last group, note that last_cluster is * already computed earlier by ext4_get_group_no_and_offset() */ if (group == last_group) end = last_cluster; if (grp->bb_free >= minlen) { cnt = ext4_trim_all_free(sb, group, first_cluster, end, minlen); if (cnt < 0) { ret = cnt; break; } trimmed += cnt; } /* * For every group except the first one, we are sure * that the first cluster to discard will be cluster #0. */ first_cluster = 0; } if (!ret) EXT4_SB(sb)->s_last_trim_minblks = minlen; out: range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; return ret; } /* Iterate all the free extents in the group. */ int ext4_mballoc_query_range( struct super_block *sb, ext4_group_t group, ext4_grpblk_t first, ext4_grpblk_t end, ext4_mballoc_query_range_fn meta_formatter, ext4_mballoc_query_range_fn formatter, void *priv) { void *bitmap; ext4_grpblk_t start, next; struct ext4_buddy e4b; int error; error = ext4_mb_load_buddy(sb, group, &e4b); if (error) return error; bitmap = e4b.bd_bitmap; ext4_lock_group(sb, group); start = max(e4b.bd_info->bb_first_free, first); if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; if (meta_formatter && start != first) { if (start > end) start = end; ext4_unlock_group(sb, group); error = meta_formatter(sb, group, first, start - first, priv); if (error) goto out_unload; ext4_lock_group(sb, group); } while (start <= end) { start = mb_find_next_zero_bit(bitmap, end + 1, start); if (start > end) break; next = mb_find_next_bit(bitmap, end + 1, start); ext4_unlock_group(sb, group); error = formatter(sb, group, start, next - start, priv); if (error) goto out_unload; ext4_lock_group(sb, group); start = next + 1; } ext4_unlock_group(sb, group); out_unload: ext4_mb_unload_buddy(&e4b); return error; } #ifdef CONFIG_EXT4_KUNIT_TESTS #include "mballoc-test.c" #endif |
2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 | /* * fs/nfs/idmap.c * * UID and GID to name mapping for clients. * * Copyright (c) 2002 The Regents of the University of Michigan. * All rights reserved. * * Marius Aamodt Eriksen <marius@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/types.h> #include <linux/parser.h> #include <linux/fs.h> #include <net/net_namespace.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/nfs_fs.h> #include <linux/nfs_fs_sb.h> #include <linux/key.h> #include <linux/keyctl.h> #include <linux/key-type.h> #include <keys/user-type.h> #include <keys/request_key_auth-type.h> #include <linux/module.h> #include <linux/user_namespace.h> #include "internal.h" #include "netns.h" #include "nfs4idmap.h" #include "nfs4trace.h" #define NFS_UINT_MAXLEN 11 static const struct cred *id_resolver_cache; static struct key_type key_type_id_resolver_legacy; struct idmap_legacy_upcalldata { struct rpc_pipe_msg pipe_msg; struct idmap_msg idmap_msg; struct key *authkey; struct idmap *idmap; }; struct idmap { struct rpc_pipe_dir_object idmap_pdo; struct rpc_pipe *idmap_pipe; struct idmap_legacy_upcalldata *idmap_upcall_data; struct mutex idmap_mutex; struct user_namespace *user_ns; }; static struct user_namespace *idmap_userns(const struct idmap *idmap) { if (idmap && idmap->user_ns) return idmap->user_ns; return &init_user_ns; } /** * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields * @fattr: fully initialised struct nfs_fattr * @owner_name: owner name string cache * @group_name: group name string cache */ void nfs_fattr_init_names(struct nfs_fattr *fattr, struct nfs4_string *owner_name, struct nfs4_string *group_name) { fattr->owner_name = owner_name; fattr->group_name = group_name; } static void nfs_fattr_free_owner_name(struct nfs_fattr *fattr) { fattr->valid &= ~NFS_ATTR_FATTR_OWNER_NAME; kfree(fattr->owner_name->data); } static void nfs_fattr_free_group_name(struct nfs_fattr *fattr) { fattr->valid &= ~NFS_ATTR_FATTR_GROUP_NAME; kfree(fattr->group_name->data); } static bool nfs_fattr_map_owner_name(struct nfs_server *server, struct nfs_fattr *fattr) { struct nfs4_string *owner = fattr->owner_name; kuid_t uid; if (!(fattr->valid & NFS_ATTR_FATTR_OWNER_NAME)) return false; if (nfs_map_name_to_uid(server, owner->data, owner->len, &uid) == 0) { fattr->uid = uid; fattr->valid |= NFS_ATTR_FATTR_OWNER; } return true; } static bool nfs_fattr_map_group_name(struct nfs_server *server, struct nfs_fattr *fattr) { struct nfs4_string *group = fattr->group_name; kgid_t gid; if (!(fattr->valid & NFS_ATTR_FATTR_GROUP_NAME)) return false; if (nfs_map_group_to_gid(server, group->data, group->len, &gid) == 0) { fattr->gid = gid; fattr->valid |= NFS_ATTR_FATTR_GROUP; } return true; } /** * nfs_fattr_free_names - free up the NFSv4 owner and group strings * @fattr: a fully initialised nfs_fattr structure */ void nfs_fattr_free_names(struct nfs_fattr *fattr) { if (fattr->valid & NFS_ATTR_FATTR_OWNER_NAME) nfs_fattr_free_owner_name(fattr); if (fattr->valid & NFS_ATTR_FATTR_GROUP_NAME) nfs_fattr_free_group_name(fattr); } /** * nfs_fattr_map_and_free_names - map owner/group strings into uid/gid and free * @server: pointer to the filesystem nfs_server structure * @fattr: a fully initialised nfs_fattr structure * * This helper maps the cached NFSv4 owner/group strings in fattr into * their numeric uid/gid equivalents, and then frees the cached strings. */ void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *fattr) { if (nfs_fattr_map_owner_name(server, fattr)) nfs_fattr_free_owner_name(fattr); if (nfs_fattr_map_group_name(server, fattr)) nfs_fattr_free_group_name(fattr); } int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res) { unsigned long val; char buf[16]; if (memchr(name, '@', namelen) != NULL || namelen >= sizeof(buf)) return 0; memcpy(buf, name, namelen); buf[namelen] = '\0'; if (kstrtoul(buf, 0, &val) != 0) return 0; *res = val; return 1; } EXPORT_SYMBOL_GPL(nfs_map_string_to_numeric); static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen) { return snprintf(buf, buflen, "%u", id); } static struct key_type key_type_id_resolver = { .name = "id_resolver", .preparse = user_preparse, .free_preparse = user_free_preparse, .instantiate = generic_key_instantiate, .revoke = user_revoke, .destroy = user_destroy, .describe = user_describe, .read = user_read, }; int nfs_idmap_init(void) { struct cred *cred; struct key *keyring; int ret = 0; printk(KERN_NOTICE "NFS: Registering the %s key type\n", key_type_id_resolver.name); cred = prepare_kernel_cred(&init_task); if (!cred) return -ENOMEM; keyring = keyring_alloc(".id_resolver", GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = register_key_type(&key_type_id_resolver); if (ret < 0) goto failed_put_key; ret = register_key_type(&key_type_id_resolver_legacy); if (ret < 0) goto failed_reg_legacy; set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; id_resolver_cache = cred; return 0; failed_reg_legacy: unregister_key_type(&key_type_id_resolver); failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; } void nfs_idmap_quit(void) { key_revoke(id_resolver_cache->thread_keyring); unregister_key_type(&key_type_id_resolver); unregister_key_type(&key_type_id_resolver_legacy); put_cred(id_resolver_cache); } /* * Assemble the description to pass to request_key() * This function will allocate a new string and update dest to point * at it. The caller is responsible for freeing dest. * * On error 0 is returned. Otherwise, the length of dest is returned. */ static ssize_t nfs_idmap_get_desc(const char *name, size_t namelen, const char *type, size_t typelen, char **desc) { char *cp; size_t desclen = typelen + namelen + 2; *desc = kmalloc(desclen, GFP_KERNEL); if (!*desc) return -ENOMEM; cp = *desc; memcpy(cp, type, typelen); cp += typelen; *cp++ = ':'; memcpy(cp, name, namelen); cp += namelen; *cp = '\0'; return desclen; } static struct key *nfs_idmap_request_key(const char *name, size_t namelen, const char *type, struct idmap *idmap) { char *desc; struct key *rkey = ERR_PTR(-EAGAIN); ssize_t ret; ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc); if (ret < 0) return ERR_PTR(ret); if (!idmap->user_ns || idmap->user_ns == &init_user_ns) rkey = request_key(&key_type_id_resolver, desc, ""); if (IS_ERR(rkey)) { mutex_lock(&idmap->idmap_mutex); rkey = request_key_with_auxdata(&key_type_id_resolver_legacy, desc, NULL, "", 0, idmap); mutex_unlock(&idmap->idmap_mutex); } if (!IS_ERR(rkey)) set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags); kfree(desc); return rkey; } static ssize_t nfs_idmap_get_key(const char *name, size_t namelen, const char *type, void *data, size_t data_size, struct idmap *idmap) { const struct cred *saved_cred; struct key *rkey; const struct user_key_payload *payload; ssize_t ret; saved_cred = override_creds(id_resolver_cache); rkey = nfs_idmap_request_key(name, namelen, type, idmap); revert_creds(saved_cred); if (IS_ERR(rkey)) { ret = PTR_ERR(rkey); goto out; } rcu_read_lock(); rkey->perm |= KEY_USR_VIEW; ret = key_validate(rkey); if (ret < 0) goto out_up; payload = user_key_payload_rcu(rkey); if (IS_ERR_OR_NULL(payload)) { ret = PTR_ERR(payload); goto out_up; } ret = payload->datalen; if (ret > 0 && ret <= data_size) memcpy(data, payload->data, ret); else ret = -EINVAL; out_up: rcu_read_unlock(); key_put(rkey); out: return ret; } /* ID -> Name */ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf, size_t buflen, struct idmap *idmap) { char id_str[NFS_UINT_MAXLEN]; int id_len; ssize_t ret; id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str)); ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap); if (ret < 0) return -EINVAL; return ret; } /* Name -> ID */ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *type, __u32 *id, struct idmap *idmap) { char id_str[NFS_UINT_MAXLEN]; long id_long; ssize_t data_size; int ret = 0; data_size = nfs_idmap_get_key(name, namelen, type, id_str, NFS_UINT_MAXLEN, idmap); if (data_size <= 0) { ret = -EINVAL; } else { ret = kstrtol(id_str, 10, &id_long); if (!ret) *id = (__u32)id_long; } return ret; } /* idmap classic begins here */ enum { Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err }; static const match_table_t nfs_idmap_tokens = { { Opt_find_uid, "uid:%s" }, { Opt_find_gid, "gid:%s" }, { Opt_find_user, "user:%s" }, { Opt_find_group, "group:%s" }, { Opt_find_err, NULL } }; static int nfs_idmap_legacy_upcall(struct key *, void *); static ssize_t idmap_pipe_downcall(struct file *, const char __user *, size_t); static void idmap_release_pipe(struct inode *); static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *); static const struct rpc_pipe_ops idmap_upcall_ops = { .upcall = rpc_pipe_generic_upcall, .downcall = idmap_pipe_downcall, .release_pipe = idmap_release_pipe, .destroy_msg = idmap_pipe_destroy_msg, }; static struct key_type key_type_id_resolver_legacy = { .name = "id_legacy", .preparse = user_preparse, .free_preparse = user_free_preparse, .instantiate = generic_key_instantiate, .revoke = user_revoke, .destroy = user_destroy, .describe = user_describe, .read = user_read, .request_key = nfs_idmap_legacy_upcall, }; static void nfs_idmap_pipe_destroy(struct dentry *dir, struct rpc_pipe_dir_object *pdo) { struct idmap *idmap = pdo->pdo_data; struct rpc_pipe *pipe = idmap->idmap_pipe; if (pipe->dentry) { rpc_unlink(pipe->dentry); pipe->dentry = NULL; } } static int nfs_idmap_pipe_create(struct dentry *dir, struct rpc_pipe_dir_object *pdo) { struct idmap *idmap = pdo->pdo_data; struct rpc_pipe *pipe = idmap->idmap_pipe; struct dentry *dentry; dentry = rpc_mkpipe_dentry(dir, "idmap", idmap, pipe); if (IS_ERR(dentry)) return PTR_ERR(dentry); pipe->dentry = dentry; return 0; } static const struct rpc_pipe_dir_object_ops nfs_idmap_pipe_dir_object_ops = { .create = nfs_idmap_pipe_create, .destroy = nfs_idmap_pipe_destroy, }; int nfs_idmap_new(struct nfs_client *clp) { struct idmap *idmap; struct rpc_pipe *pipe; int error; idmap = kzalloc(sizeof(*idmap), GFP_KERNEL); if (idmap == NULL) return -ENOMEM; mutex_init(&idmap->idmap_mutex); idmap->user_ns = get_user_ns(clp->cl_rpcclient->cl_cred->user_ns); rpc_init_pipe_dir_object(&idmap->idmap_pdo, &nfs_idmap_pipe_dir_object_ops, idmap); pipe = rpc_mkpipe_data(&idmap_upcall_ops, 0); if (IS_ERR(pipe)) { error = PTR_ERR(pipe); goto err; } idmap->idmap_pipe = pipe; error = rpc_add_pipe_dir_object(clp->cl_net, &clp->cl_rpcclient->cl_pipedir_objects, &idmap->idmap_pdo); if (error) goto err_destroy_pipe; clp->cl_idmap = idmap; return 0; err_destroy_pipe: rpc_destroy_pipe_data(idmap->idmap_pipe); err: put_user_ns(idmap->user_ns); kfree(idmap); return error; } void nfs_idmap_delete(struct nfs_client *clp) { struct idmap *idmap = clp->cl_idmap; if (!idmap) return; clp->cl_idmap = NULL; rpc_remove_pipe_dir_object(clp->cl_net, &clp->cl_rpcclient->cl_pipedir_objects, &idmap->idmap_pdo); rpc_destroy_pipe_data(idmap->idmap_pipe); put_user_ns(idmap->user_ns); kfree(idmap); } static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, struct idmap_msg *im, struct rpc_pipe_msg *msg) { substring_t substr; int token, ret; im->im_type = IDMAP_TYPE_GROUP; token = match_token(desc, nfs_idmap_tokens, &substr); switch (token) { case Opt_find_uid: im->im_type = IDMAP_TYPE_USER; fallthrough; case Opt_find_gid: im->im_conv = IDMAP_CONV_NAMETOID; ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ); break; case Opt_find_user: im->im_type = IDMAP_TYPE_USER; fallthrough; case Opt_find_group: im->im_conv = IDMAP_CONV_IDTONAME; ret = match_int(&substr, &im->im_id); if (ret) goto out; break; default: ret = -EINVAL; goto out; } msg->data = im; msg->len = sizeof(struct idmap_msg); out: return ret; } static bool nfs_idmap_prepare_pipe_upcall(struct idmap *idmap, struct idmap_legacy_upcalldata *data) { if (idmap->idmap_upcall_data != NULL) { WARN_ON_ONCE(1); return false; } idmap->idmap_upcall_data = data; return true; } static void nfs_idmap_complete_pipe_upcall(struct idmap_legacy_upcalldata *data, int ret) { complete_request_key(data->authkey, ret); key_put(data->authkey); kfree(data); } static void nfs_idmap_abort_pipe_upcall(struct idmap *idmap, struct idmap_legacy_upcalldata *data, int ret) { if (cmpxchg(&idmap->idmap_upcall_data, data, NULL) == data) nfs_idmap_complete_pipe_upcall(data, ret); } static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux) { struct idmap_legacy_upcalldata *data; struct request_key_auth *rka = get_request_key_auth(authkey); struct rpc_pipe_msg *msg; struct idmap_msg *im; struct idmap *idmap = aux; struct key *key = rka->target_key; int ret = -ENOKEY; if (!aux) goto out1; /* msg and im are freed in idmap_pipe_destroy_msg */ ret = -ENOMEM; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) goto out1; msg = &data->pipe_msg; im = &data->idmap_msg; data->idmap = idmap; data->authkey = key_get(authkey); ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); if (ret < 0) goto out2; ret = -EAGAIN; if (!nfs_idmap_prepare_pipe_upcall(idmap, data)) goto out2; ret = rpc_queue_upcall(idmap->idmap_pipe, msg); if (ret < 0) nfs_idmap_abort_pipe_upcall(idmap, data, ret); return ret; out2: kfree(data); out1: complete_request_key(authkey, ret); return ret; } static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen) { return key_instantiate_and_link(key, data, datalen, id_resolver_cache->thread_keyring, authkey); } static int nfs_idmap_read_and_verify_message(struct idmap_msg *im, struct idmap_msg *upcall, struct key *key, struct key *authkey) { char id_str[NFS_UINT_MAXLEN]; size_t len; int ret = -ENOKEY; /* ret = -ENOKEY */ if (upcall->im_type != im->im_type || upcall->im_conv != im->im_conv) goto out; switch (im->im_conv) { case IDMAP_CONV_NAMETOID: if (strcmp(upcall->im_name, im->im_name) != 0) break; /* Note: here we store the NUL terminator too */ len = 1 + nfs_map_numeric_to_string(im->im_id, id_str, sizeof(id_str)); ret = nfs_idmap_instantiate(key, authkey, id_str, len); break; case IDMAP_CONV_IDTONAME: if (upcall->im_id != im->im_id) break; len = strlen(im->im_name); ret = nfs_idmap_instantiate(key, authkey, im->im_name, len); break; default: ret = -EINVAL; } out: return ret; } static ssize_t idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) { struct request_key_auth *rka; struct rpc_inode *rpci = RPC_I(file_inode(filp)); struct idmap *idmap = (struct idmap *)rpci->private; struct idmap_legacy_upcalldata *data; struct key *authkey; struct idmap_msg im; size_t namelen_in; int ret = -ENOKEY; /* If instantiation is successful, anyone waiting for key construction * will have been woken up and someone else may now have used * idmap_key_cons - so after this point we may no longer touch it. */ data = xchg(&idmap->idmap_upcall_data, NULL); if (data == NULL) goto out_noupcall; authkey = data->authkey; rka = get_request_key_auth(authkey); if (mlen != sizeof(im)) { ret = -ENOSPC; goto out; } if (copy_from_user(&im, src, mlen) != 0) { ret = -EFAULT; goto out; } if (!(im.im_status & IDMAP_STATUS_SUCCESS)) { ret = -ENOKEY; goto out; } namelen_in = strnlen(im.im_name, IDMAP_NAMESZ); if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) { ret = -EINVAL; goto out; } ret = nfs_idmap_read_and_verify_message(&im, &data->idmap_msg, rka->target_key, authkey); if (ret >= 0) { key_set_timeout(rka->target_key, nfs_idmap_cache_timeout); ret = mlen; } out: nfs_idmap_complete_pipe_upcall(data, ret); out_noupcall: return ret; } static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg) { struct idmap_legacy_upcalldata *data = container_of(msg, struct idmap_legacy_upcalldata, pipe_msg); struct idmap *idmap = data->idmap; if (msg->errno) nfs_idmap_abort_pipe_upcall(idmap, data, msg->errno); } static void idmap_release_pipe(struct inode *inode) { struct rpc_inode *rpci = RPC_I(inode); struct idmap *idmap = (struct idmap *)rpci->private; struct idmap_legacy_upcalldata *data; data = xchg(&idmap->idmap_upcall_data, NULL); if (data) nfs_idmap_complete_pipe_upcall(data, -EPIPE); } int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, kuid_t *uid) { struct idmap *idmap = server->nfs_client->cl_idmap; __u32 id = -1; int ret = 0; if (!nfs_map_string_to_numeric(name, namelen, &id)) ret = nfs_idmap_lookup_id(name, namelen, "uid", &id, idmap); if (ret == 0) { *uid = make_kuid(idmap_userns(idmap), id); if (!uid_valid(*uid)) ret = -ERANGE; } trace_nfs4_map_name_to_uid(name, namelen, id, ret); return ret; } int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, kgid_t *gid) { struct idmap *idmap = server->nfs_client->cl_idmap; __u32 id = -1; int ret = 0; if (!nfs_map_string_to_numeric(name, namelen, &id)) ret = nfs_idmap_lookup_id(name, namelen, "gid", &id, idmap); if (ret == 0) { *gid = make_kgid(idmap_userns(idmap), id); if (!gid_valid(*gid)) ret = -ERANGE; } trace_nfs4_map_group_to_gid(name, namelen, id, ret); return ret; } int nfs_map_uid_to_name(const struct nfs_server *server, kuid_t uid, char *buf, size_t buflen) { struct idmap *idmap = server->nfs_client->cl_idmap; int ret = -EINVAL; __u32 id; id = from_kuid_munged(idmap_userns(idmap), uid); if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) ret = nfs_idmap_lookup_name(id, "user", buf, buflen, idmap); if (ret < 0) ret = nfs_map_numeric_to_string(id, buf, buflen); trace_nfs4_map_uid_to_name(buf, ret, id, ret); return ret; } int nfs_map_gid_to_group(const struct nfs_server *server, kgid_t gid, char *buf, size_t buflen) { struct idmap *idmap = server->nfs_client->cl_idmap; int ret = -EINVAL; __u32 id; id = from_kgid_munged(idmap_userns(idmap), gid); if (!(server->caps & NFS_CAP_UIDGID_NOMAP)) ret = nfs_idmap_lookup_name(id, "group", buf, buflen, idmap); if (ret < 0) ret = nfs_map_numeric_to_string(id, buf, buflen); trace_nfs4_map_gid_to_group(buf, ret, id, ret); return ret; } |
1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 | // SPDX-License-Identifier: GPL-2.0-only /* * stack_o2cb.c * * Code which interfaces ocfs2 with the o2cb stack. * * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/kernel.h> #include <linux/crc32.h> #include <linux/slab.h> #include <linux/module.h> /* Needed for AOP_TRUNCATED_PAGE in mlog_errno() */ #include <linux/fs.h> #include "cluster/masklog.h" #include "cluster/nodemanager.h" #include "cluster/heartbeat.h" #include "cluster/tcp.h" #include "stackglue.h" struct o2dlm_private { struct dlm_eviction_cb op_eviction_cb; }; static struct ocfs2_stack_plugin o2cb_stack; /* These should be identical */ #if (DLM_LOCK_IV != LKM_IVMODE) # error Lock modes do not match #endif #if (DLM_LOCK_NL != LKM_NLMODE) # error Lock modes do not match #endif #if (DLM_LOCK_CR != LKM_CRMODE) # error Lock modes do not match #endif #if (DLM_LOCK_CW != LKM_CWMODE) # error Lock modes do not match #endif #if (DLM_LOCK_PR != LKM_PRMODE) # error Lock modes do not match #endif #if (DLM_LOCK_PW != LKM_PWMODE) # error Lock modes do not match #endif #if (DLM_LOCK_EX != LKM_EXMODE) # error Lock modes do not match #endif static inline int mode_to_o2dlm(int mode) { BUG_ON(mode > LKM_MAXMODE); return mode; } static int flags_to_o2dlm(u32 flags) { int o2dlm_flags = 0; if (flags & DLM_LKF_NOQUEUE) o2dlm_flags |= LKM_NOQUEUE; if (flags & DLM_LKF_CANCEL) o2dlm_flags |= LKM_CANCEL; if (flags & DLM_LKF_CONVERT) o2dlm_flags |= LKM_CONVERT; if (flags & DLM_LKF_VALBLK) o2dlm_flags |= LKM_VALBLK; if (flags & DLM_LKF_IVVALBLK) o2dlm_flags |= LKM_INVVALBLK; if (flags & DLM_LKF_ORPHAN) o2dlm_flags |= LKM_ORPHAN; if (flags & DLM_LKF_FORCEUNLOCK) o2dlm_flags |= LKM_FORCE; if (flags & DLM_LKF_TIMEOUT) o2dlm_flags |= LKM_TIMEOUT; if (flags & DLM_LKF_LOCAL) o2dlm_flags |= LKM_LOCAL; return o2dlm_flags; } /* * Map an o2dlm status to standard errno values. * * o2dlm only uses a handful of these, and returns even fewer to the * caller. Still, we try to assign sane values to each error. * * The following value pairs have special meanings to dlmglue, thus * the right hand side needs to stay unique - never duplicate the * mapping elsewhere in the table! * * DLM_NORMAL: 0 * DLM_NOTQUEUED: -EAGAIN * DLM_CANCELGRANT: -EBUSY * DLM_CANCEL: -DLM_ECANCEL */ /* Keep in sync with dlmapi.h */ static int status_map[] = { [DLM_NORMAL] = 0, /* Success */ [DLM_GRANTED] = -EINVAL, [DLM_DENIED] = -EACCES, [DLM_DENIED_NOLOCKS] = -EACCES, [DLM_WORKING] = -EACCES, [DLM_BLOCKED] = -EINVAL, [DLM_BLOCKED_ORPHAN] = -EINVAL, [DLM_DENIED_GRACE_PERIOD] = -EACCES, [DLM_SYSERR] = -ENOMEM, /* It is what it is */ [DLM_NOSUPPORT] = -EPROTO, [DLM_CANCELGRANT] = -EBUSY, /* Cancel after grant */ [DLM_IVLOCKID] = -EINVAL, [DLM_SYNC] = -EINVAL, [DLM_BADTYPE] = -EINVAL, [DLM_BADRESOURCE] = -EINVAL, [DLM_MAXHANDLES] = -ENOMEM, [DLM_NOCLINFO] = -EINVAL, [DLM_NOLOCKMGR] = -EINVAL, [DLM_NOPURGED] = -EINVAL, [DLM_BADARGS] = -EINVAL, [DLM_VOID] = -EINVAL, [DLM_NOTQUEUED] = -EAGAIN, /* Trylock failed */ [DLM_IVBUFLEN] = -EINVAL, [DLM_CVTUNGRANT] = -EPERM, [DLM_BADPARAM] = -EINVAL, [DLM_VALNOTVALID] = -EINVAL, [DLM_REJECTED] = -EPERM, [DLM_ABORT] = -EINVAL, [DLM_CANCEL] = -DLM_ECANCEL, /* Successful cancel */ [DLM_IVRESHANDLE] = -EINVAL, [DLM_DEADLOCK] = -EDEADLK, [DLM_DENIED_NOASTS] = -EINVAL, [DLM_FORWARD] = -EINVAL, [DLM_TIMEOUT] = -ETIMEDOUT, [DLM_IVGROUPID] = -EINVAL, [DLM_VERS_CONFLICT] = -EOPNOTSUPP, [DLM_BAD_DEVICE_PATH] = -ENOENT, [DLM_NO_DEVICE_PERMISSION] = -EPERM, [DLM_NO_CONTROL_DEVICE] = -ENOENT, [DLM_RECOVERING] = -ENOTCONN, [DLM_MIGRATING] = -ERESTART, [DLM_MAXSTATS] = -EINVAL, }; static int dlm_status_to_errno(enum dlm_status status) { BUG_ON(status < 0 || status >= ARRAY_SIZE(status_map)); return status_map[status]; } static void o2dlm_lock_ast_wrapper(void *astarg) { struct ocfs2_dlm_lksb *lksb = astarg; lksb->lksb_conn->cc_proto->lp_lock_ast(lksb); } static void o2dlm_blocking_ast_wrapper(void *astarg, int level) { struct ocfs2_dlm_lksb *lksb = astarg; lksb->lksb_conn->cc_proto->lp_blocking_ast(lksb, level); } static void o2dlm_unlock_ast_wrapper(void *astarg, enum dlm_status status) { struct ocfs2_dlm_lksb *lksb = astarg; int error = dlm_status_to_errno(status); /* * In o2dlm, you can get both the lock_ast() for the lock being * granted and the unlock_ast() for the CANCEL failing. A * successful cancel sends DLM_NORMAL here. If the * lock grant happened before the cancel arrived, you get * DLM_CANCELGRANT. * * There's no need for the double-ast. If we see DLM_CANCELGRANT, * we just ignore it. We expect the lock_ast() to handle the * granted lock. */ if (status == DLM_CANCELGRANT) return; lksb->lksb_conn->cc_proto->lp_unlock_ast(lksb, error); } static int o2cb_dlm_lock(struct ocfs2_cluster_connection *conn, int mode, struct ocfs2_dlm_lksb *lksb, u32 flags, void *name, unsigned int namelen) { enum dlm_status status; int o2dlm_mode = mode_to_o2dlm(mode); int o2dlm_flags = flags_to_o2dlm(flags); int ret; status = dlmlock(conn->cc_lockspace, o2dlm_mode, &lksb->lksb_o2dlm, o2dlm_flags, name, namelen, o2dlm_lock_ast_wrapper, lksb, o2dlm_blocking_ast_wrapper); ret = dlm_status_to_errno(status); return ret; } static int o2cb_dlm_unlock(struct ocfs2_cluster_connection *conn, struct ocfs2_dlm_lksb *lksb, u32 flags) { enum dlm_status status; int o2dlm_flags = flags_to_o2dlm(flags); int ret; status = dlmunlock(conn->cc_lockspace, &lksb->lksb_o2dlm, o2dlm_flags, o2dlm_unlock_ast_wrapper, lksb); ret = dlm_status_to_errno(status); return ret; } static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb) { return dlm_status_to_errno(lksb->lksb_o2dlm.status); } /* * o2dlm always has a "valid" LVB. If the dlm loses track of the LVB * contents, it will zero out the LVB. Thus the caller can always trust * the contents. */ static int o2cb_dlm_lvb_valid(struct ocfs2_dlm_lksb *lksb) { return 1; } static void *o2cb_dlm_lvb(struct ocfs2_dlm_lksb *lksb) { return (void *)(lksb->lksb_o2dlm.lvb); } static void o2cb_dump_lksb(struct ocfs2_dlm_lksb *lksb) { dlm_print_one_lock(lksb->lksb_o2dlm.lockid); } /* * Check if this node is heartbeating and is connected to all other * heartbeating nodes. */ static int o2cb_cluster_check(void) { u8 node_num; int i; unsigned long hbmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; unsigned long netmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; node_num = o2nm_this_node(); if (node_num == O2NM_MAX_NODES) { printk(KERN_ERR "o2cb: This node has not been configured.\n"); return -EINVAL; } /* * o2dlm expects o2net sockets to be created. If not, then * dlm_join_domain() fails with a stack of errors which are both cryptic * and incomplete. The idea here is to detect upfront whether we have * managed to connect to all nodes or not. If not, then list the nodes * to allow the user to check the configuration (incorrect IP, firewall, * etc.) Yes, this is racy. But its not the end of the world. */ #define O2CB_MAP_STABILIZE_COUNT 60 for (i = 0; i < O2CB_MAP_STABILIZE_COUNT; ++i) { o2hb_fill_node_map(hbmap, O2NM_MAX_NODES); if (!test_bit(node_num, hbmap)) { printk(KERN_ERR "o2cb: %s heartbeat has not been " "started.\n", (o2hb_global_heartbeat_active() ? "Global" : "Local")); return -EINVAL; } o2net_fill_node_map(netmap, O2NM_MAX_NODES); /* Force set the current node to allow easy compare */ set_bit(node_num, netmap); if (bitmap_equal(hbmap, netmap, O2NM_MAX_NODES)) return 0; if (i < O2CB_MAP_STABILIZE_COUNT - 1) msleep(1000); } printk(KERN_ERR "o2cb: This node could not connect to nodes:"); i = -1; while ((i = find_next_bit(hbmap, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) { if (!test_bit(i, netmap)) printk(" %u", i); } printk(".\n"); return -ENOTCONN; } /* * Called from the dlm when it's about to evict a node. This is how the * classic stack signals node death. */ static void o2dlm_eviction_cb(int node_num, void *data) { struct ocfs2_cluster_connection *conn = data; printk(KERN_NOTICE "o2cb: o2dlm has evicted node %d from domain %.*s\n", node_num, conn->cc_namelen, conn->cc_name); conn->cc_recovery_handler(node_num, conn->cc_recovery_data); } static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) { int rc = 0; u32 dlm_key; struct dlm_ctxt *dlm; struct o2dlm_private *priv; struct dlm_protocol_version fs_version; BUG_ON(conn == NULL); BUG_ON(conn->cc_proto == NULL); /* Ensure cluster stack is up and all nodes are connected */ rc = o2cb_cluster_check(); if (rc) { printk(KERN_ERR "o2cb: Cluster check failed. Fix errors " "before retrying.\n"); goto out; } priv = kzalloc(sizeof(struct o2dlm_private), GFP_KERNEL); if (!priv) { rc = -ENOMEM; goto out_free; } /* This just fills the structure in. It is safe to pass conn. */ dlm_setup_eviction_cb(&priv->op_eviction_cb, o2dlm_eviction_cb, conn); conn->cc_private = priv; /* used by the dlm code to make message headers unique, each * node in this domain must agree on this. */ dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); fs_version.pv_major = conn->cc_version.pv_major; fs_version.pv_minor = conn->cc_version.pv_minor; dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version); if (IS_ERR(dlm)) { rc = PTR_ERR(dlm); mlog_errno(rc); goto out_free; } conn->cc_version.pv_major = fs_version.pv_major; conn->cc_version.pv_minor = fs_version.pv_minor; conn->cc_lockspace = dlm; dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); out_free: if (rc) kfree(conn->cc_private); out: return rc; } static int o2cb_cluster_disconnect(struct ocfs2_cluster_connection *conn) { struct dlm_ctxt *dlm = conn->cc_lockspace; struct o2dlm_private *priv = conn->cc_private; dlm_unregister_eviction_cb(&priv->op_eviction_cb); conn->cc_private = NULL; kfree(priv); dlm_unregister_domain(dlm); conn->cc_lockspace = NULL; return 0; } static int o2cb_cluster_this_node(struct ocfs2_cluster_connection *conn, unsigned int *node) { int node_num; node_num = o2nm_this_node(); if (node_num == O2NM_INVALID_NODE_NUM) return -ENOENT; if (node_num >= O2NM_MAX_NODES) return -EOVERFLOW; *node = node_num; return 0; } static const struct ocfs2_stack_operations o2cb_stack_ops = { .connect = o2cb_cluster_connect, .disconnect = o2cb_cluster_disconnect, .this_node = o2cb_cluster_this_node, .dlm_lock = o2cb_dlm_lock, .dlm_unlock = o2cb_dlm_unlock, .lock_status = o2cb_dlm_lock_status, .lvb_valid = o2cb_dlm_lvb_valid, .lock_lvb = o2cb_dlm_lvb, .dump_lksb = o2cb_dump_lksb, }; static struct ocfs2_stack_plugin o2cb_stack = { .sp_name = "o2cb", .sp_ops = &o2cb_stack_ops, .sp_owner = THIS_MODULE, }; static int __init o2cb_stack_init(void) { return ocfs2_stack_glue_register(&o2cb_stack); } static void __exit o2cb_stack_exit(void) { ocfs2_stack_glue_unregister(&o2cb_stack); } MODULE_AUTHOR("Oracle"); MODULE_DESCRIPTION("ocfs2 driver for the classic o2cb stack"); MODULE_LICENSE("GPL"); module_init(o2cb_stack_init); module_exit(o2cb_stack_exit); |
13 13 4 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 | // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi) */ #include <linux/types.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/timer.h> #include <net/ax25.h> #include <linux/skbuff.h> #include <net/netrom.h> #include <linux/init.h> static void nr_loopback_timer(struct timer_list *); static struct sk_buff_head loopback_queue; static DEFINE_TIMER(loopback_timer, nr_loopback_timer); void __init nr_loopback_init(void) { skb_queue_head_init(&loopback_queue); } static inline int nr_loopback_running(void) { return timer_pending(&loopback_timer); } int nr_loopback_queue(struct sk_buff *skb) { struct sk_buff *skbn; if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) { skb_copy_from_linear_data(skb, skb_put(skbn, skb->len), skb->len); skb_reset_transport_header(skbn); skb_queue_tail(&loopback_queue, skbn); if (!nr_loopback_running()) mod_timer(&loopback_timer, jiffies + 10); } kfree_skb(skb); return 1; } static void nr_loopback_timer(struct timer_list *unused) { struct sk_buff *skb; ax25_address *nr_dest; struct net_device *dev; if ((skb = skb_dequeue(&loopback_queue)) != NULL) { nr_dest = (ax25_address *)(skb->data + 7); dev = nr_dev_get(nr_dest); if (dev == NULL || nr_rx_frame(skb, dev) == 0) kfree_skb(skb); dev_put(dev); if (!skb_queue_empty(&loopback_queue) && !nr_loopback_running()) mod_timer(&loopback_timer, jiffies + 10); } } void nr_loopback_clear(void) { timer_delete_sync(&loopback_timer); skb_queue_purge(&loopback_queue); } |
90 90 1 90 90 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core OF support code * * Copyright (C) 2008 Jochen Friedrich <jochen@scram.de> * based on a previous patch from Jon Smirl <jonsmirl@gmail.com> * * Copyright (C) 2013, 2018 Wolfram Sang <wsa@kernel.org> */ #include <dt-bindings/i2c/i2c.h> #include <linux/device.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/sysfs.h> #include "i2c-core.h" int of_i2c_get_board_info(struct device *dev, struct device_node *node, struct i2c_board_info *info) { u32 addr; int ret; memset(info, 0, sizeof(*info)); if (of_alias_from_compatible(node, info->type, sizeof(info->type)) < 0) { dev_err(dev, "of_i2c: modalias failure on %pOF\n", node); return -EINVAL; } ret = of_property_read_u32(node, "reg", &addr); if (ret) { dev_err(dev, "of_i2c: invalid reg on %pOF\n", node); return ret; } if (addr & I2C_TEN_BIT_ADDRESS) { addr &= ~I2C_TEN_BIT_ADDRESS; info->flags |= I2C_CLIENT_TEN; } if (addr & I2C_OWN_SLAVE_ADDRESS) { addr &= ~I2C_OWN_SLAVE_ADDRESS; info->flags |= I2C_CLIENT_SLAVE; } info->addr = addr; info->of_node = node; info->fwnode = of_fwnode_handle(node); if (of_property_read_bool(node, "host-notify")) info->flags |= I2C_CLIENT_HOST_NOTIFY; if (of_property_read_bool(node, "wakeup-source")) info->flags |= I2C_CLIENT_WAKE; return 0; } EXPORT_SYMBOL_GPL(of_i2c_get_board_info); static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, struct device_node *node) { struct i2c_client *client; struct i2c_board_info info; int ret; dev_dbg(&adap->dev, "of_i2c: register %pOF\n", node); ret = of_i2c_get_board_info(&adap->dev, node, &info); if (ret) return ERR_PTR(ret); client = i2c_new_client_device(adap, &info); if (IS_ERR(client)) dev_err(&adap->dev, "of_i2c: Failure registering %pOF\n", node); return client; } void of_i2c_register_devices(struct i2c_adapter *adap) { struct device_node *bus, *node; struct i2c_client *client; /* Only register child devices if the adapter has a node pointer set */ if (!adap->dev.of_node) return; dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus"); if (!bus) bus = of_node_get(adap->dev.of_node); for_each_available_child_of_node(bus, node) { if (of_node_test_and_set_flag(node, OF_POPULATED)) continue; client = of_i2c_register_device(adap, node); if (IS_ERR(client)) { dev_err(&adap->dev, "Failed to create I2C device for %pOF\n", node); of_node_clear_flag(node, OF_POPULATED); } } of_node_put(bus); } static const struct of_device_id* i2c_of_match_device_sysfs(const struct of_device_id *matches, struct i2c_client *client) { const char *name; for (; matches->compatible[0]; matches++) { /* * Adding devices through the i2c sysfs interface provides us * a string to match which may be compatible with the device * tree compatible strings, however with no actual of_node the * of_match_device() will not match */ if (sysfs_streq(client->name, matches->compatible)) return matches; name = strchr(matches->compatible, ','); if (!name) name = matches->compatible; else name++; if (sysfs_streq(client->name, name)) return matches; } return NULL; } const struct of_device_id *i2c_of_match_device(const struct of_device_id *matches, struct i2c_client *client) { const struct of_device_id *match; if (!(client && matches)) return NULL; match = of_match_device(matches, &client->dev); if (match) return match; return i2c_of_match_device_sysfs(matches, client); } #if IS_ENABLED(CONFIG_OF_DYNAMIC) static int of_i2c_notify(struct notifier_block *nb, unsigned long action, void *arg) { struct of_reconfig_data *rd = arg; struct i2c_adapter *adap; struct i2c_client *client; switch (of_reconfig_get_state_change(action, rd)) { case OF_RECONFIG_CHANGE_ADD: adap = of_find_i2c_adapter_by_node(rd->dn->parent); if (adap == NULL) return NOTIFY_OK; /* not for us */ if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { put_device(&adap->dev); return NOTIFY_OK; } /* * Clear the flag before adding the device so that fw_devlink * doesn't skip adding consumers to this device. */ rd->dn->fwnode.flags &= ~FWNODE_FLAG_NOT_DEVICE; client = of_i2c_register_device(adap, rd->dn); if (IS_ERR(client)) { dev_err(&adap->dev, "failed to create client for '%pOF'\n", rd->dn); put_device(&adap->dev); of_node_clear_flag(rd->dn, OF_POPULATED); return notifier_from_errno(PTR_ERR(client)); } put_device(&adap->dev); break; case OF_RECONFIG_CHANGE_REMOVE: /* already depopulated? */ if (!of_node_check_flag(rd->dn, OF_POPULATED)) return NOTIFY_OK; /* find our device by node */ client = of_find_i2c_device_by_node(rd->dn); if (client == NULL) return NOTIFY_OK; /* no? not meant for us */ /* unregister takes one ref away */ i2c_unregister_device(client); /* and put the reference of the find */ put_device(&client->dev); break; } return NOTIFY_OK; } struct notifier_block i2c_of_notifier = { .notifier_call = of_i2c_notify, }; #endif /* CONFIG_OF_DYNAMIC */ |
22 22 22 22 22 5 17 5 5 5 5 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 | /* * llc_s_ev.c - Defines SAP component events * * The followed event functions are SAP component events which are described * in 802.2 LLC protocol standard document. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/socket.h> #include <net/sock.h> #include <net/llc_if.h> #include <net/llc_s_ev.h> #include <net/llc_pdu.h> int llc_sap_ev_activation_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_SIMPLE && ev->prim_type == LLC_SAP_EV_ACTIVATION_REQ ? 0 : 1; } int llc_sap_ev_rx_ui(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_UI ? 0 : 1; } int llc_sap_ev_unitdata_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_DATAUNIT_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_xid_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_XID_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_rx_xid_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1; } int llc_sap_ev_rx_xid_r(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_XID ? 0 : 1; } int llc_sap_ev_test_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_PRIM && ev->prim == LLC_TEST_PRIM && ev->prim_type == LLC_PRIM_TYPE_REQ ? 0 : 1; } int llc_sap_ev_rx_test_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1; } int llc_sap_ev_rx_test_r(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); return ev->type == LLC_SAP_EV_TYPE_PDU && LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_1_PDU_CMD_TEST ? 0 : 1; } int llc_sap_ev_deactivation_req(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); return ev->type == LLC_SAP_EV_TYPE_SIMPLE && ev->prim_type == LLC_SAP_EV_DEACTIVATION_REQ ? 0 : 1; } |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2010 Daniel Mack <daniel@caiaq.de> * * This file holds USB constants and structures defined * by the USB Device Class Definition for Audio Devices in version 2.0. * Comments below reference relevant sections of the documents contained * in http://www.usb.org/developers/devclass_docs/Audio2.0_final.zip */ #ifndef __LINUX_USB_AUDIO_V2_H #define __LINUX_USB_AUDIO_V2_H #include <linux/types.h> /* v1.0 and v2.0 of this standard have many things in common. For the rest * of the definitions, please refer to audio.h */ /* * bmControl field decoders * * From the USB Audio spec v2.0: * * bmaControls() is a (ch+1)-element array of 4-byte bitmaps, * each containing a set of bit pairs. If a Control is present, * it must be Host readable. If a certain Control is not * present then the bit pair must be set to 0b00. * If a Control is present but read-only, the bit pair must be * set to 0b01. If a Control is also Host programmable, the bit * pair must be set to 0b11. The value 0b10 is not allowed. * */ static inline bool uac_v2v3_control_is_readable(u32 bmControls, u8 control) { return (bmControls >> ((control - 1) * 2)) & 0x1; } static inline bool uac_v2v3_control_is_writeable(u32 bmControls, u8 control) { return (bmControls >> ((control - 1) * 2)) & 0x2; } /* 4.7.2 Class-Specific AC Interface Descriptor */ struct uac2_ac_header_descriptor { __u8 bLength; /* 9 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ __le16 bcdADC; /* 0x0200 */ __u8 bCategory; __le16 wTotalLength; /* includes Unit and Terminal desc. */ __u8 bmControls; } __packed; /* 2.3.1.6 Type I Format Type Descriptor (Frmts20 final.pdf)*/ struct uac2_format_type_i_descriptor { __u8 bLength; /* in bytes: 6 */ __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ __u8 bDescriptorSubtype; /* FORMAT_TYPE */ __u8 bFormatType; /* FORMAT_TYPE_1 */ __u8 bSubslotSize; /* {1,2,3,4} */ __u8 bBitResolution; } __packed; /* 4.7.2.1 Clock Source Descriptor */ struct uac_clock_source_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bmAttributes; __u8 bmControls; __u8 bAssocTerminal; __u8 iClockSource; } __attribute__((packed)); /* bmAttribute fields */ #define UAC_CLOCK_SOURCE_TYPE_EXT 0x0 #define UAC_CLOCK_SOURCE_TYPE_INT_FIXED 0x1 #define UAC_CLOCK_SOURCE_TYPE_INT_VAR 0x2 #define UAC_CLOCK_SOURCE_TYPE_INT_PROG 0x3 #define UAC_CLOCK_SOURCE_SYNCED_TO_SOF (1 << 2) /* 4.7.2.2 Clock Selector Descriptor */ struct uac_clock_selector_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bNrInPins; __u8 baCSourceID[]; /* bmControls and iClockSelector omitted */ } __attribute__((packed)); /* 4.7.2.3 Clock Multiplier Descriptor */ struct uac_clock_multiplier_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bClockID; __u8 bCSourceID; __u8 bmControls; __u8 iClockMultiplier; } __attribute__((packed)); /* 4.7.2.4 Input terminal descriptor */ struct uac2_input_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bCSourceID; __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; __le16 bmControls; __u8 iTerminal; } __attribute__((packed)); /* 4.7.2.5 Output terminal descriptor */ struct uac2_output_terminal_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalID; __le16 wTerminalType; __u8 bAssocTerminal; __u8 bSourceID; __u8 bCSourceID; __le16 bmControls; __u8 iTerminal; } __attribute__((packed)); /* 4.7.2.8 Feature Unit Descriptor */ struct uac2_feature_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __u8 bSourceID; /* bmaControls is actually u32, * but u8 is needed for the hybrid parser */ __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); #define UAC2_DT_FEATURE_UNIT_SIZE(ch) (6 + ((ch) + 1) * 4) /* As above, but more useful for defining your own descriptors: */ #define DECLARE_UAC2_FEATURE_UNIT_DESCRIPTOR(ch) \ struct uac2_feature_unit_descriptor_##ch { \ __u8 bLength; \ __u8 bDescriptorType; \ __u8 bDescriptorSubtype; \ __u8 bUnitID; \ __u8 bSourceID; \ __le32 bmaControls[ch + 1]; \ __u8 iFeature; \ } __packed /* 4.7.2.10 Effect Unit Descriptor */ struct uac2_effect_unit_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bUnitID; __le16 wEffectType; __u8 bSourceID; __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); /* 4.9.2 Class-Specific AS Interface Descriptor */ struct uac2_as_header_descriptor { __u8 bLength; __u8 bDescriptorType; __u8 bDescriptorSubtype; __u8 bTerminalLink; __u8 bmControls; __u8 bFormatType; __le32 bmFormats; __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); #define UAC2_FORMAT_TYPE_I_RAW_DATA (1 << 31) /* 4.10.1.2 Class-Specific AS Isochronous Audio Data Endpoint Descriptor */ struct uac2_iso_endpoint_descriptor { __u8 bLength; /* in bytes: 8 */ __u8 bDescriptorType; /* USB_DT_CS_ENDPOINT */ __u8 bDescriptorSubtype; /* EP_GENERAL */ __u8 bmAttributes; __u8 bmControls; __u8 bLockDelayUnits; __le16 wLockDelay; } __attribute__((packed)); #define UAC2_CONTROL_PITCH (3 << 0) #define UAC2_CONTROL_DATA_OVERRUN (3 << 2) #define UAC2_CONTROL_DATA_UNDERRUN (3 << 4) /* 5.2.5.4.2 Connector Control Parameter Block */ struct uac2_connectors_ctl_blk { __u8 bNrChannels; __le32 bmChannelConfig; __u8 iChannelNames; } __attribute__((packed)); /* 6.1 Interrupt Data Message */ #define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0) #define UAC2_INTERRUPT_DATA_MSG_EP (1 << 1) struct uac2_interrupt_data_msg { __u8 bInfo; __u8 bAttribute; __le16 wValue; __le16 wIndex; } __attribute__((packed)); /* A.7 Audio Function Category Codes */ #define UAC2_FUNCTION_SUBCLASS_UNDEFINED 0x00 #define UAC2_FUNCTION_DESKTOP_SPEAKER 0x01 #define UAC2_FUNCTION_HOME_THEATER 0x02 #define UAC2_FUNCTION_MICROPHONE 0x03 #define UAC2_FUNCTION_HEADSET 0x04 #define UAC2_FUNCTION_TELEPHONE 0x05 #define UAC2_FUNCTION_CONVERTER 0x06 #define UAC2_FUNCTION_SOUND_RECORDER 0x07 #define UAC2_FUNCTION_IO_BOX 0x08 #define UAC2_FUNCTION_MUSICAL_INSTRUMENT 0x09 #define UAC2_FUNCTION_PRO_AUDIO 0x0a #define UAC2_FUNCTION_AUDIO_VIDEO 0x0b #define UAC2_FUNCTION_CONTROL_PANEL 0x0c #define UAC2_FUNCTION_OTHER 0xff /* A.9 Audio Class-Specific AC Interface Descriptor Subtypes */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_EFFECT_UNIT 0x07 #define UAC2_PROCESSING_UNIT_V2 0x08 #define UAC2_EXTENSION_UNIT_V2 0x09 #define UAC2_CLOCK_SOURCE 0x0a #define UAC2_CLOCK_SELECTOR 0x0b #define UAC2_CLOCK_MULTIPLIER 0x0c #define UAC2_SAMPLE_RATE_CONVERTER 0x0d /* A.10 Audio Class-Specific AS Interface Descriptor Subtypes */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_ENCODER 0x03 #define UAC2_DECODER 0x04 /* A.11 Effect Unit Effect Types */ #define UAC2_EFFECT_UNDEFINED 0x00 #define UAC2_EFFECT_PARAM_EQ 0x01 #define UAC2_EFFECT_REVERB 0x02 #define UAC2_EFFECT_MOD_DELAY 0x03 #define UAC2_EFFECT_DYN_RANGE_COMP 0x04 /* A.12 Processing Unit Process Types */ #define UAC2_PROCESS_UNDEFINED 0x00 #define UAC2_PROCESS_UP_DOWNMIX 0x01 #define UAC2_PROCESS_DOLBY_PROLOCIC 0x02 #define UAC2_PROCESS_STEREO_EXTENDER 0x03 /* A.14 Audio Class-Specific Request Codes */ #define UAC2_CS_CUR 0x01 #define UAC2_CS_RANGE 0x02 #define UAC2_CS_MEM 0x03 /* A.15 Encoder Type Codes */ #define UAC2_ENCODER_UNDEFINED 0x00 #define UAC2_ENCODER_OTHER 0x01 #define UAC2_ENCODER_MPEG 0x02 #define UAC2_ENCODER_AC3 0x03 #define UAC2_ENCODER_WMA 0x04 #define UAC2_ENCODER_DTS 0x05 /* A.16 Decoder Type Codes */ #define UAC2_DECODER_UNDEFINED 0x00 #define UAC2_DECODER_OTHER 0x01 #define UAC2_DECODER_MPEG 0x02 #define UAC2_DECODER_AC3 0x03 #define UAC2_DECODER_WMA 0x04 #define UAC2_DECODER_DTS 0x05 /* A.17.1 Clock Source Control Selectors */ #define UAC2_CS_UNDEFINED 0x00 #define UAC2_CS_CONTROL_SAM_FREQ 0x01 #define UAC2_CS_CONTROL_CLOCK_VALID 0x02 /* A.17.2 Clock Selector Control Selectors */ #define UAC2_CX_UNDEFINED 0x00 #define UAC2_CX_CLOCK_SELECTOR 0x01 /* A.17.3 Clock Multiplier Control Selectors */ #define UAC2_CM_UNDEFINED 0x00 #define UAC2_CM_NUMERATOR 0x01 #define UAC2_CM_DENOMINTATOR 0x02 /* A.17.4 Terminal Control Selectors */ #define UAC2_TE_UNDEFINED 0x00 #define UAC2_TE_COPY_PROTECT 0x01 #define UAC2_TE_CONNECTOR 0x02 #define UAC2_TE_OVERLOAD 0x03 #define UAC2_TE_CLUSTER 0x04 #define UAC2_TE_UNDERFLOW 0x05 #define UAC2_TE_OVERFLOW 0x06 #define UAC2_TE_LATENCY 0x07 /* A.17.5 Mixer Control Selectors */ #define UAC2_MU_UNDEFINED 0x00 #define UAC2_MU_MIXER 0x01 #define UAC2_MU_CLUSTER 0x02 #define UAC2_MU_UNDERFLOW 0x03 #define UAC2_MU_OVERFLOW 0x04 #define UAC2_MU_LATENCY 0x05 /* A.17.6 Selector Control Selectors */ #define UAC2_SU_UNDEFINED 0x00 #define UAC2_SU_SELECTOR 0x01 #define UAC2_SU_LATENCY 0x02 /* A.17.7 Feature Unit Control Selectors */ /* see audio.h for the rest, which is identical to v1 */ #define UAC2_FU_INPUT_GAIN 0x0b #define UAC2_FU_INPUT_GAIN_PAD 0x0c #define UAC2_FU_PHASE_INVERTER 0x0d #define UAC2_FU_UNDERFLOW 0x0e #define UAC2_FU_OVERFLOW 0x0f #define UAC2_FU_LATENCY 0x10 /* A.17.8.1 Parametric Equalizer Section Effect Unit Control Selectors */ #define UAC2_PE_UNDEFINED 0x00 #define UAC2_PE_ENABLE 0x01 #define UAC2_PE_CENTERFREQ 0x02 #define UAC2_PE_QFACTOR 0x03 #define UAC2_PE_GAIN 0x04 #define UAC2_PE_UNDERFLOW 0x05 #define UAC2_PE_OVERFLOW 0x06 #define UAC2_PE_LATENCY 0x07 /* A.17.8.2 Reverberation Effect Unit Control Selectors */ #define UAC2_RV_UNDEFINED 0x00 #define UAC2_RV_ENABLE 0x01 #define UAC2_RV_TYPE 0x02 #define UAC2_RV_LEVEL 0x03 #define UAC2_RV_TIME 0x04 #define UAC2_RV_FEEDBACK 0x05 #define UAC2_RV_PREDELAY 0x06 #define UAC2_RV_DENSITY 0x07 #define UAC2_RV_HIFREQ_ROLLOFF 0x08 #define UAC2_RV_UNDERFLOW 0x09 #define UAC2_RV_OVERFLOW 0x0a #define UAC2_RV_LATENCY 0x0b /* A.17.8.3 Modulation Delay Effect Control Selectors */ #define UAC2_MD_UNDEFINED 0x00 #define UAC2_MD_ENABLE 0x01 #define UAC2_MD_BALANCE 0x02 #define UAC2_MD_RATE 0x03 #define UAC2_MD_DEPTH 0x04 #define UAC2_MD_TIME 0x05 #define UAC2_MD_FEEDBACK 0x06 #define UAC2_MD_UNDERFLOW 0x07 #define UAC2_MD_OVERFLOW 0x08 #define UAC2_MD_LATENCY 0x09 /* A.17.8.4 Dynamic Range Compressor Effect Unit Control Selectors */ #define UAC2_DR_UNDEFINED 0x00 #define UAC2_DR_ENABLE 0x01 #define UAC2_DR_COMPRESSION_RATE 0x02 #define UAC2_DR_MAXAMPL 0x03 #define UAC2_DR_THRESHOLD 0x04 #define UAC2_DR_ATTACK_TIME 0x05 #define UAC2_DR_RELEASE_TIME 0x06 #define UAC2_DR_UNDEFLOW 0x07 #define UAC2_DR_OVERFLOW 0x08 #define UAC2_DR_LATENCY 0x09 /* A.17.9.1 Up/Down-mix Processing Unit Control Selectors */ #define UAC2_UD_UNDEFINED 0x00 #define UAC2_UD_ENABLE 0x01 #define UAC2_UD_MODE_SELECT 0x02 #define UAC2_UD_CLUSTER 0x03 #define UAC2_UD_UNDERFLOW 0x04 #define UAC2_UD_OVERFLOW 0x05 #define UAC2_UD_LATENCY 0x06 /* A.17.9.2 Dolby Prologic[tm] Processing Unit Control Selectors */ #define UAC2_DP_UNDEFINED 0x00 #define UAC2_DP_ENABLE 0x01 #define UAC2_DP_MODE_SELECT 0x02 #define UAC2_DP_CLUSTER 0x03 #define UAC2_DP_UNDERFFLOW 0x04 #define UAC2_DP_OVERFLOW 0x05 #define UAC2_DP_LATENCY 0x06 /* A.17.9.3 Stereo Expander Processing Unit Control Selectors */ #define UAC2_ST_EXT_UNDEFINED 0x00 #define UAC2_ST_EXT_ENABLE 0x01 #define UAC2_ST_EXT_WIDTH 0x02 #define UAC2_ST_EXT_UNDEFLOW 0x03 #define UAC2_ST_EXT_OVERFLOW 0x04 #define UAC2_ST_EXT_LATENCY 0x05 /* A.17.10 Extension Unit Control Selectors */ #define UAC2_XU_UNDEFINED 0x00 #define UAC2_XU_ENABLE 0x01 #define UAC2_XU_CLUSTER 0x02 #define UAC2_XU_UNDERFLOW 0x03 #define UAC2_XU_OVERFLOW 0x04 #define UAC2_XU_LATENCY 0x05 /* A.17.11 AudioStreaming Interface Control Selectors */ #define UAC2_AS_UNDEFINED 0x00 #define UAC2_AS_ACT_ALT_SETTING 0x01 #define UAC2_AS_VAL_ALT_SETTINGS 0x02 #define UAC2_AS_AUDIO_DATA_FORMAT 0x03 /* A.17.12 Encoder Control Selectors */ #define UAC2_EN_UNDEFINED 0x00 #define UAC2_EN_BIT_RATE 0x01 #define UAC2_EN_QUALITY 0x02 #define UAC2_EN_VBR 0x03 #define UAC2_EN_TYPE 0x04 #define UAC2_EN_UNDERFLOW 0x05 #define UAC2_EN_OVERFLOW 0x06 #define UAC2_EN_ENCODER_ERROR 0x07 #define UAC2_EN_PARAM1 0x08 #define UAC2_EN_PARAM2 0x09 #define UAC2_EN_PARAM3 0x0a #define UAC2_EN_PARAM4 0x0b #define UAC2_EN_PARAM5 0x0c #define UAC2_EN_PARAM6 0x0d #define UAC2_EN_PARAM7 0x0e #define UAC2_EN_PARAM8 0x0f /* A.17.13.1 MPEG Decoder Control Selectors */ #define UAC2_MPEG_UNDEFINED 0x00 #define UAC2_MPEG_DUAL_CHANNEL 0x01 #define UAC2_MPEG_SECOND_STEREO 0x02 #define UAC2_MPEG_MULTILINGUAL 0x03 #define UAC2_MPEG_DYN_RANGE 0x04 #define UAC2_MPEG_SCALING 0x05 #define UAC2_MPEG_HILO_SCALING 0x06 #define UAC2_MPEG_UNDERFLOW 0x07 #define UAC2_MPEG_OVERFLOW 0x08 #define UAC2_MPEG_DECODER_ERROR 0x09 /* A17.13.2 AC3 Decoder Control Selectors */ #define UAC2_AC3_UNDEFINED 0x00 #define UAC2_AC3_MODE 0x01 #define UAC2_AC3_DYN_RANGE 0x02 #define UAC2_AC3_SCALING 0x03 #define UAC2_AC3_HILO_SCALING 0x04 #define UAC2_AC3_UNDERFLOW 0x05 #define UAC2_AC3_OVERFLOW 0x06 #define UAC2_AC3_DECODER_ERROR 0x07 /* A17.13.3 WMA Decoder Control Selectors */ #define UAC2_WMA_UNDEFINED 0x00 #define UAC2_WMA_UNDERFLOW 0x01 #define UAC2_WMA_OVERFLOW 0x02 #define UAC2_WMA_DECODER_ERROR 0x03 /* A17.13.4 DTS Decoder Control Selectors */ #define UAC2_DTS_UNDEFINED 0x00 #define UAC2_DTS_UNDERFLOW 0x01 #define UAC2_DTS_OVERFLOW 0x02 #define UAC2_DTS_DECODER_ERROR 0x03 /* A17.14 Endpoint Control Selectors */ #define UAC2_EP_CS_UNDEFINED 0x00 #define UAC2_EP_CS_PITCH 0x01 #define UAC2_EP_CS_DATA_OVERRUN 0x02 #define UAC2_EP_CS_DATA_UNDERRUN 0x03 #endif /* __LINUX_USB_AUDIO_V2_H */ |
1 142 137 1 498 502 500 185 166 131 4 130 486 446 491 156 126 154 120 23 15 4 2 2 8 1 471 471 2 119 481 381 128 2 459 120 143 485 485 142 142 141 1 485 135 485 134 485 131 2 462 120 459 124 125 132 131 136 458 459 458 459 140 140 458 459 116 15 459 459 459 459 459 459 2 120 119 51 497 185 43 156 1 156 2 152 17 1 1 15 8 8 171 1 3 165 2 151 35 3 2 1 3 2 5 137 5 137 135 41 135 137 141 3 2 124 2 2 3 3 137 136 137 137 137 136 137 137 137 1 136 135 135 1 135 132 135 135 134 134 134 136 137 140 3 137 135 2 140 137 7 1 3 144 1 138 5 144 140 133 6 2 3 1 3 11 11 2 8 6 7 7 11 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 | // SPDX-License-Identifier: GPL-2.0-only /* * Kernel-based Virtual Machine driver for Linux * cpuid support routines * * derived from arch/x86/kvm/x86.c * * Copyright 2011 Red Hat, Inc. and/or its affiliates. * Copyright IBM Corporation, 2008 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kvm_host.h> #include "linux/lockdep.h" #include <linux/export.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> #include <linux/sched/stat.h> #include <asm/processor.h> #include <asm/user.h> #include <asm/fpu/xstate.h> #include <asm/sgx.h> #include <asm/cpuid.h> #include "cpuid.h" #include "lapic.h" #include "mmu.h" #include "trace.h" #include "pmu.h" #include "xen.h" /* * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be * aligned to sizeof(unsigned long) because it's not accessed via bitops. */ u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; EXPORT_SYMBOL_GPL(kvm_cpu_caps); struct cpuid_xstate_sizes { u32 eax; u32 ebx; u32 ecx; }; static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init; void __init kvm_init_xstate_sizes(void) { u32 ign; int i; for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) { struct cpuid_xstate_sizes *xs = &xstate_sizes[i]; cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign); } } u32 xstate_required_size(u64 xstate_bv, bool compacted) { u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; int i; xstate_bv &= XFEATURE_MASK_EXTEND; for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes) && xstate_bv; i++) { struct cpuid_xstate_sizes *xs = &xstate_sizes[i]; u32 offset; if (!(xstate_bv & BIT_ULL(i))) continue; /* ECX[1]: 64B alignment in compacted form */ if (compacted) offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret; else offset = xs->ebx; ret = max(ret, offset + xs->eax); xstate_bv &= ~BIT_ULL(i); } return ret; } /* * Magic value used by KVM when querying userspace-provided CPUID entries and * doesn't care about the CPIUD index because the index of the function in * question is not significant. Note, this magic value must have at least one * bit set in bits[63:32] and must be consumed as a u64 by cpuid_entry2_find() * to avoid false positives when processing guest CPUID input. */ #define KVM_CPUID_INDEX_NOT_SIGNIFICANT -1ull static struct kvm_cpuid_entry2 *cpuid_entry2_find(struct kvm_vcpu *vcpu, u32 function, u64 index) { struct kvm_cpuid_entry2 *e; int i; /* * KVM has a semi-arbitrary rule that querying the guest's CPUID model * with IRQs disabled is disallowed. The CPUID model can legitimately * have over one hundred entries, i.e. the lookup is slow, and IRQs are * typically disabled in KVM only when KVM is in a performance critical * path, e.g. the core VM-Enter/VM-Exit run loop. Nothing will break * if this rule is violated, this assertion is purely to flag potential * performance issues. If this fires, consider moving the lookup out * of the hotpath, e.g. by caching information during CPUID updates. */ lockdep_assert_irqs_enabled(); for (i = 0; i < vcpu->arch.cpuid_nent; i++) { e = &vcpu->arch.cpuid_entries[i]; if (e->function != function) continue; /* * If the index isn't significant, use the first entry with a * matching function. It's userspace's responsibility to not * provide "duplicate" entries in all cases. */ if (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index) return e; /* * Similarly, use the first matching entry if KVM is doing a * lookup (as opposed to emulating CPUID) for a function that's * architecturally defined as not having a significant index. */ if (index == KVM_CPUID_INDEX_NOT_SIGNIFICANT) { /* * Direct lookups from KVM should not diverge from what * KVM defines internally (the architectural behavior). */ WARN_ON_ONCE(cpuid_function_is_indexed(function)); return e; } } return NULL; } struct kvm_cpuid_entry2 *kvm_find_cpuid_entry_index(struct kvm_vcpu *vcpu, u32 function, u32 index) { return cpuid_entry2_find(vcpu, function, index); } EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry_index); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function) { return cpuid_entry2_find(vcpu, function, KVM_CPUID_INDEX_NOT_SIGNIFICANT); } EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); /* * cpuid_entry2_find() and KVM_CPUID_INDEX_NOT_SIGNIFICANT should never be used * directly outside of kvm_find_cpuid_entry() and kvm_find_cpuid_entry_index(). */ #undef KVM_CPUID_INDEX_NOT_SIGNIFICANT static int kvm_check_cpuid(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; u64 xfeatures; /* * The existing code assumes virtual address is 48-bit or 57-bit in the * canonical address checks; exit if it is ever changed. */ best = kvm_find_cpuid_entry(vcpu, 0x80000008); if (best) { int vaddr_bits = (best->eax & 0xff00) >> 8; if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) return -EINVAL; } /* * Exposing dynamic xfeatures to the guest requires additional * enabling in the FPU, e.g. to expand the guest XSAVE state size. */ best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0); if (!best) return 0; xfeatures = best->eax | ((u64)best->edx << 32); xfeatures &= XFEATURE_MASK_USER_DYNAMIC; if (!xfeatures) return 0; return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures); } static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu); static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu); /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent) { struct kvm_cpuid_entry2 *orig; int i; /* * Apply runtime CPUID updates to the incoming CPUID entries to avoid * false positives due mismatches on KVM-owned feature flags. * * Note! @e2 and @nent track the _old_ CPUID entries! */ kvm_update_cpuid_runtime(vcpu); kvm_apply_cpuid_pv_features_quirk(vcpu); if (nent != vcpu->arch.cpuid_nent) return -EINVAL; for (i = 0; i < nent; i++) { orig = &vcpu->arch.cpuid_entries[i]; if (e2[i].function != orig->function || e2[i].index != orig->index || e2[i].flags != orig->flags || e2[i].eax != orig->eax || e2[i].ebx != orig->ebx || e2[i].ecx != orig->ecx || e2[i].edx != orig->edx) return -EINVAL; } return 0; } static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu, const char *sig) { struct kvm_hypervisor_cpuid cpuid = {}; struct kvm_cpuid_entry2 *entry; u32 base; for_each_possible_hypervisor_cpuid_base(base) { entry = kvm_find_cpuid_entry(vcpu, base); if (entry) { u32 signature[3]; signature[0] = entry->ebx; signature[1] = entry->ecx; signature[2] = entry->edx; if (!memcmp(signature, sig, sizeof(signature))) { cpuid.base = base; cpuid.limit = entry->eax; break; } } } return cpuid; } static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu) { struct kvm_hypervisor_cpuid kvm_cpuid; struct kvm_cpuid_entry2 *best; kvm_cpuid = kvm_get_hypervisor_cpuid(vcpu, KVM_SIGNATURE); if (!kvm_cpuid.base) return 0; best = kvm_find_cpuid_entry(vcpu, kvm_cpuid.base | KVM_CPUID_FEATURES); if (!best) return 0; if (kvm_hlt_in_guest(vcpu->kvm)) best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); return best->eax; } /* * Calculate guest's supported XCR0 taking into account guest CPUID data and * KVM's supported XCR0 (comprised of host's XCR0 and KVM_SUPPORTED_XCR0). */ static u64 cpuid_get_supported_xcr0(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry_index(vcpu, 0xd, 0); if (!best) return 0; return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0; } static __always_inline void kvm_update_feature_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entry, unsigned int x86_feature, bool has_feature) { cpuid_entry_change(entry, x86_feature, has_feature); guest_cpu_cap_change(vcpu, x86_feature, has_feature); } static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; vcpu->arch.cpuid_dynamic_bits_dirty = false; best = kvm_find_cpuid_entry(vcpu, 1); if (best) { kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSXSAVE, kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)); kvm_update_feature_runtime(vcpu, best, X86_FEATURE_APIC, vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) kvm_update_feature_runtime(vcpu, best, X86_FEATURE_MWAIT, vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT); } best = kvm_find_cpuid_entry_index(vcpu, 7, 0); if (best) kvm_update_feature_runtime(vcpu, best, X86_FEATURE_OSPKE, kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)); best = kvm_find_cpuid_entry_index(vcpu, 0xD, 0); if (best) best->ebx = xstate_required_size(vcpu->arch.xcr0, false); best = kvm_find_cpuid_entry_index(vcpu, 0xD, 1); if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || cpuid_entry_has(best, X86_FEATURE_XSAVEC))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); } static bool kvm_cpuid_has_hyperv(struct kvm_vcpu *vcpu) { #ifdef CONFIG_KVM_HYPERV struct kvm_cpuid_entry2 *entry; entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE); return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX; #else return false; #endif } static bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *entry; entry = kvm_find_cpuid_entry(vcpu, 0); if (!entry) return false; return is_guest_vendor_amd(entry->ebx, entry->ecx, entry->edx) || is_guest_vendor_hygon(entry->ebx, entry->ecx, entry->edx); } /* * This isn't truly "unsafe", but except for the cpu_caps initialization code, * all register lookups should use __cpuid_entry_get_reg(), which provides * compile-time validation of the input. */ static u32 cpuid_get_reg_unsafe(struct kvm_cpuid_entry2 *entry, u32 reg) { switch (reg) { case CPUID_EAX: return entry->eax; case CPUID_EBX: return entry->ebx; case CPUID_ECX: return entry->ecx; case CPUID_EDX: return entry->edx; default: WARN_ON_ONCE(1); return 0; } } static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func, bool include_partially_emulated); void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *entry; bool allow_gbpages; int i; memset(vcpu->arch.cpu_caps, 0, sizeof(vcpu->arch.cpu_caps)); BUILD_BUG_ON(ARRAY_SIZE(reverse_cpuid) != NR_KVM_CPU_CAPS); /* * Reset guest capabilities to userspace's guest CPUID definition, i.e. * honor userspace's definition for features that don't require KVM or * hardware management/support (or that KVM simply doesn't care about). */ for (i = 0; i < NR_KVM_CPU_CAPS; i++) { const struct cpuid_reg cpuid = reverse_cpuid[i]; struct kvm_cpuid_entry2 emulated; if (!cpuid.function) continue; entry = kvm_find_cpuid_entry_index(vcpu, cpuid.function, cpuid.index); if (!entry) continue; cpuid_func_emulated(&emulated, cpuid.function, true); /* * A vCPU has a feature if it's supported by KVM and is enabled * in guest CPUID. Note, this includes features that are * supported by KVM but aren't advertised to userspace! */ vcpu->arch.cpu_caps[i] = kvm_cpu_caps[i] | cpuid_get_reg_unsafe(&emulated, cpuid.reg); vcpu->arch.cpu_caps[i] &= cpuid_get_reg_unsafe(entry, cpuid.reg); } kvm_update_cpuid_runtime(vcpu); /* * If TDP is enabled, let the guest use GBPAGES if they're supported in * hardware. The hardware page walker doesn't let KVM disable GBPAGES, * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA * walk for performance and complexity reasons. Not to mention KVM * _can't_ solve the problem because GVA->GPA walks aren't visible to * KVM once a TDP translation is installed. Mimic hardware behavior so * that KVM's is at least consistent, i.e. doesn't randomly inject #PF. * If TDP is disabled, honor *only* guest CPUID as KVM has full control * and can install smaller shadow pages if the host lacks 1GiB support. */ allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) : guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES); guest_cpu_cap_change(vcpu, X86_FEATURE_GBPAGES, allow_gbpages); best = kvm_find_cpuid_entry(vcpu, 1); if (best && apic) { if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER)) apic->lapic_timer.timer_mode_mask = 3 << 17; else apic->lapic_timer.timer_mode_mask = 1 << 17; kvm_apic_set_version(vcpu); } vcpu->arch.guest_supported_xcr0 = cpuid_get_supported_xcr0(vcpu); vcpu->arch.pv_cpuid.features = kvm_apply_cpuid_pv_features_quirk(vcpu); vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); kvm_pmu_refresh(vcpu); #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f) vcpu->arch.cr4_guest_rsvd_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_) | __cr4_reserved_bits(guest_cpu_cap_has, vcpu); #undef __kvm_cpu_cap_has kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu)); /* Invoke the vendor callback only after the above state is updated. */ kvm_x86_call(vcpu_after_set_cpuid)(vcpu); /* * Except for the MMU, which needs to do its thing any vendor specific * adjustments to the reserved GPA bits. */ kvm_mmu_after_set_cpuid(vcpu); } int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 0x80000000); if (!best || best->eax < 0x80000008) goto not_found; best = kvm_find_cpuid_entry(vcpu, 0x80000008); if (best) return best->eax & 0xff; not_found: return 36; } /* * This "raw" version returns the reserved GPA bits without any adjustments for * encryption technologies that usurp bits. The raw mask should be used if and * only if hardware does _not_ strip the usurped bits, e.g. in virtual MTRRs. */ u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu) { return rsvd_bits(cpuid_maxphyaddr(vcpu), 63); } static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent) { u32 vcpu_caps[NR_KVM_CPU_CAPS]; int r; /* * Swap the existing (old) entries with the incoming (new) entries in * order to massage the new entries, e.g. to account for dynamic bits * that KVM controls, without clobbering the current guest CPUID, which * KVM needs to preserve in order to unwind on failure. * * Similarly, save the vCPU's current cpu_caps so that the capabilities * can be updated alongside the CPUID entries when performing runtime * updates. Full initialization is done if and only if the vCPU hasn't * run, i.e. only if userspace is potentially changing CPUID features. */ swap(vcpu->arch.cpuid_entries, e2); swap(vcpu->arch.cpuid_nent, nent); memcpy(vcpu_caps, vcpu->arch.cpu_caps, sizeof(vcpu_caps)); BUILD_BUG_ON(sizeof(vcpu_caps) != sizeof(vcpu->arch.cpu_caps)); /* * KVM does not correctly handle changing guest CPUID after KVM_RUN, as * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with * the core vCPU model on the fly. It would've been better to forbid any * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do * KVM_SET_CPUID{,2} again. To support this legacy behavior, check * whether the supplied CPUID data is equal to what's already set. */ if (kvm_vcpu_has_run(vcpu)) { r = kvm_cpuid_check_equal(vcpu, e2, nent); if (r) goto err; goto success; } #ifdef CONFIG_KVM_HYPERV if (kvm_cpuid_has_hyperv(vcpu)) { r = kvm_hv_vcpu_init(vcpu); if (r) goto err; } #endif r = kvm_check_cpuid(vcpu); if (r) goto err; #ifdef CONFIG_KVM_XEN vcpu->arch.xen.cpuid = kvm_get_hypervisor_cpuid(vcpu, XEN_SIGNATURE); #endif kvm_vcpu_after_set_cpuid(vcpu); success: kvfree(e2); return 0; err: memcpy(vcpu->arch.cpu_caps, vcpu_caps, sizeof(vcpu_caps)); swap(vcpu->arch.cpuid_entries, e2); swap(vcpu->arch.cpuid_nent, nent); return r; } /* when an old userspace process fills a new kernel module */ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid *cpuid, struct kvm_cpuid_entry __user *entries) { int r, i; struct kvm_cpuid_entry *e = NULL; struct kvm_cpuid_entry2 *e2 = NULL; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) return -E2BIG; if (cpuid->nent) { e = vmemdup_array_user(entries, cpuid->nent, sizeof(*e)); if (IS_ERR(e)) return PTR_ERR(e); e2 = kvmalloc_array(cpuid->nent, sizeof(*e2), GFP_KERNEL_ACCOUNT); if (!e2) { r = -ENOMEM; goto out_free_cpuid; } } for (i = 0; i < cpuid->nent; i++) { e2[i].function = e[i].function; e2[i].eax = e[i].eax; e2[i].ebx = e[i].ebx; e2[i].ecx = e[i].ecx; e2[i].edx = e[i].edx; e2[i].index = 0; e2[i].flags = 0; e2[i].padding[0] = 0; e2[i].padding[1] = 0; e2[i].padding[2] = 0; } r = kvm_set_cpuid(vcpu, e2, cpuid->nent); if (r) kvfree(e2); out_free_cpuid: kvfree(e); return r; } int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { struct kvm_cpuid_entry2 *e2 = NULL; int r; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) return -E2BIG; if (cpuid->nent) { e2 = vmemdup_array_user(entries, cpuid->nent, sizeof(*e2)); if (IS_ERR(e2)) return PTR_ERR(e2); } r = kvm_set_cpuid(vcpu, e2, cpuid->nent); if (r) kvfree(e2); return r; } int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries) { if (cpuid->nent < vcpu->arch.cpuid_nent) return -E2BIG; if (vcpu->arch.cpuid_dynamic_bits_dirty) kvm_update_cpuid_runtime(vcpu); if (copy_to_user(entries, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) return -EFAULT; cpuid->nent = vcpu->arch.cpuid_nent; return 0; } static __always_inline u32 raw_cpuid_get(struct cpuid_reg cpuid) { struct kvm_cpuid_entry2 entry; u32 base; /* * KVM only supports features defined by Intel (0x0), AMD (0x80000000), * and Centaur (0xc0000000). WARN if a feature for new vendor base is * defined, as this and other code would need to be updated. */ base = cpuid.function & 0xffff0000; if (WARN_ON_ONCE(base && base != 0x80000000 && base != 0xc0000000)) return 0; if (cpuid_eax(base) < cpuid.function) return 0; cpuid_count(cpuid.function, cpuid.index, &entry.eax, &entry.ebx, &entry.ecx, &entry.edx); return *__cpuid_entry_get_reg(&entry, cpuid.reg); } /* * For kernel-defined leafs, mask KVM's supported feature set with the kernel's * capabilities as well as raw CPUID. For KVM-defined leafs, consult only raw * CPUID, as KVM is the one and only authority (in the kernel). */ #define kvm_cpu_cap_init(leaf, feature_initializers...) \ do { \ const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32); \ const u32 __maybe_unused kvm_cpu_cap_init_in_progress = leaf; \ const u32 *kernel_cpu_caps = boot_cpu_data.x86_capability; \ u32 kvm_cpu_cap_passthrough = 0; \ u32 kvm_cpu_cap_synthesized = 0; \ u32 kvm_cpu_cap_emulated = 0; \ u32 kvm_cpu_cap_features = 0; \ \ feature_initializers \ \ kvm_cpu_caps[leaf] = kvm_cpu_cap_features; \ \ if (leaf < NCAPINTS) \ kvm_cpu_caps[leaf] &= kernel_cpu_caps[leaf]; \ \ kvm_cpu_caps[leaf] |= kvm_cpu_cap_passthrough; \ kvm_cpu_caps[leaf] &= (raw_cpuid_get(cpuid) | \ kvm_cpu_cap_synthesized); \ kvm_cpu_caps[leaf] |= kvm_cpu_cap_emulated; \ } while (0) /* * Assert that the feature bit being declared, e.g. via F(), is in the CPUID * word that's being initialized. Exempt 0x8000_0001.EDX usage of 0x1.EDX * features, as AMD duplicated many 0x1.EDX features into 0x8000_0001.EDX. */ #define KVM_VALIDATE_CPU_CAP_USAGE(name) \ do { \ u32 __leaf = __feature_leaf(X86_FEATURE_##name); \ \ BUILD_BUG_ON(__leaf != kvm_cpu_cap_init_in_progress); \ } while (0) #define F(name) \ ({ \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ kvm_cpu_cap_features |= feature_bit(name); \ }) /* Scattered Flag - For features that are scattered by cpufeatures.h. */ #define SCATTERED_F(name) \ ({ \ BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ if (boot_cpu_has(X86_FEATURE_##name)) \ F(name); \ }) /* Features that KVM supports only on 64-bit kernels. */ #define X86_64_F(name) \ ({ \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ if (IS_ENABLED(CONFIG_X86_64)) \ F(name); \ }) /* * Emulated Feature - For features that KVM emulates in software irrespective * of host CPU/kernel support. */ #define EMULATED_F(name) \ ({ \ kvm_cpu_cap_emulated |= feature_bit(name); \ F(name); \ }) /* * Synthesized Feature - For features that are synthesized into boot_cpu_data, * i.e. may not be present in the raw CPUID, but can still be advertised to * userspace. Primarily used for mitigation related feature flags. */ #define SYNTHESIZED_F(name) \ ({ \ kvm_cpu_cap_synthesized |= feature_bit(name); \ F(name); \ }) /* * Passthrough Feature - For features that KVM supports based purely on raw * hardware CPUID, i.e. that KVM virtualizes even if the host kernel doesn't * use the feature. Simply force set the feature in KVM's capabilities, raw * CPUID support will be factored in by kvm_cpu_cap_mask(). */ #define PASSTHROUGH_F(name) \ ({ \ kvm_cpu_cap_passthrough |= feature_bit(name); \ F(name); \ }) /* * Aliased Features - For features in 0x8000_0001.EDX that are duplicates of * identical 0x1.EDX features, and thus are aliased from 0x1 to 0x8000_0001. */ #define ALIASED_1_EDX_F(name) \ ({ \ BUILD_BUG_ON(__feature_leaf(X86_FEATURE_##name) != CPUID_1_EDX); \ BUILD_BUG_ON(kvm_cpu_cap_init_in_progress != CPUID_8000_0001_EDX); \ kvm_cpu_cap_features |= feature_bit(name); \ }) /* * Vendor Features - For features that KVM supports, but are added in later * because they require additional vendor enabling. */ #define VENDOR_F(name) \ ({ \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ }) /* * Runtime Features - For features that KVM dynamically sets/clears at runtime, * e.g. when CR4 changes, but which are never advertised to userspace. */ #define RUNTIME_F(name) \ ({ \ KVM_VALIDATE_CPU_CAP_USAGE(name); \ }) /* * Undefine the MSR bit macro to avoid token concatenation issues when * processing X86_FEATURE_SPEC_CTRL_SSBD. */ #undef SPEC_CTRL_SSBD /* DS is defined by ptrace-abi.h on 32-bit builds. */ #undef DS void kvm_set_cpu_caps(void) { memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps)); BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) > sizeof(boot_cpu_data.x86_capability)); kvm_cpu_cap_init(CPUID_1_ECX, F(XMM3), F(PCLMULQDQ), VENDOR_F(DTES64), /* * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not* * advertised to guests via CPUID! MWAIT is also technically a * runtime flag thanks to IA32_MISC_ENABLES; mark it as such so * that KVM is aware that it's a known, unadvertised flag. */ RUNTIME_F(MWAIT), /* DS-CPL */ VENDOR_F(VMX), /* SMX, EST */ /* TM2 */ F(SSSE3), /* CNXT-ID */ /* Reserved */ F(FMA), F(CX16), /* xTPR Update */ F(PDCM), F(PCID), /* Reserved, DCA */ F(XMM4_1), F(XMM4_2), EMULATED_F(X2APIC), F(MOVBE), F(POPCNT), EMULATED_F(TSC_DEADLINE_TIMER), F(AES), F(XSAVE), RUNTIME_F(OSXSAVE), F(AVX), F(F16C), F(RDRAND), EMULATED_F(HYPERVISOR), ); kvm_cpu_cap_init(CPUID_1_EDX, F(FPU), F(VME), F(DE), F(PSE), F(TSC), F(MSR), F(PAE), F(MCE), F(CX8), F(APIC), /* Reserved */ F(SEP), F(MTRR), F(PGE), F(MCA), F(CMOV), F(PAT), F(PSE36), /* PSN */ F(CLFLUSH), /* Reserved */ VENDOR_F(DS), /* ACPI */ F(MMX), F(FXSR), F(XMM), F(XMM2), F(SELFSNOOP), /* HTT, TM, Reserved, PBE */ ); kvm_cpu_cap_init(CPUID_7_0_EBX, F(FSGSBASE), EMULATED_F(TSC_ADJUST), F(SGX), F(BMI1), F(HLE), F(AVX2), F(FDP_EXCPTN_ONLY), F(SMEP), F(BMI2), F(ERMS), F(INVPCID), F(RTM), F(ZERO_FCS_FDS), VENDOR_F(MPX), F(AVX512F), F(AVX512DQ), F(RDSEED), F(ADX), F(SMAP), F(AVX512IFMA), F(CLFLUSHOPT), F(CLWB), VENDOR_F(INTEL_PT), F(AVX512PF), F(AVX512ER), F(AVX512CD), F(SHA_NI), F(AVX512BW), F(AVX512VL), ); kvm_cpu_cap_init(CPUID_7_ECX, F(AVX512VBMI), PASSTHROUGH_F(LA57), F(PKU), RUNTIME_F(OSPKE), F(RDPID), F(AVX512_VPOPCNTDQ), F(UMIP), F(AVX512_VBMI2), F(GFNI), F(VAES), F(VPCLMULQDQ), F(AVX512_VNNI), F(AVX512_BITALG), F(CLDEMOTE), F(MOVDIRI), F(MOVDIR64B), VENDOR_F(WAITPKG), F(SGX_LC), F(BUS_LOCK_DETECT), ); /* * PKU not yet implemented for shadow paging and requires OSPKE * to be set on the host. Clear it if that is not the case */ if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) kvm_cpu_cap_clear(X86_FEATURE_PKU); kvm_cpu_cap_init(CPUID_7_EDX, F(AVX512_4VNNIW), F(AVX512_4FMAPS), F(SPEC_CTRL), F(SPEC_CTRL_SSBD), EMULATED_F(ARCH_CAPABILITIES), F(INTEL_STIBP), F(MD_CLEAR), F(AVX512_VP2INTERSECT), F(FSRM), F(SERIALIZE), F(TSXLDTRK), F(AVX512_FP16), F(AMX_TILE), F(AMX_INT8), F(AMX_BF16), F(FLUSH_L1D), ); if (boot_cpu_has(X86_FEATURE_AMD_IBPB_RET) && boot_cpu_has(X86_FEATURE_AMD_IBPB) && boot_cpu_has(X86_FEATURE_AMD_IBRS)) kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL); if (boot_cpu_has(X86_FEATURE_STIBP)) kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP); if (boot_cpu_has(X86_FEATURE_AMD_SSBD)) kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD); kvm_cpu_cap_init(CPUID_7_1_EAX, F(SHA512), F(SM3), F(SM4), F(AVX_VNNI), F(AVX512_BF16), F(CMPCCXADD), F(FZRM), F(FSRS), F(FSRC), F(AMX_FP16), F(AVX_IFMA), F(LAM), ); kvm_cpu_cap_init(CPUID_7_1_EDX, F(AVX_VNNI_INT8), F(AVX_NE_CONVERT), F(AMX_COMPLEX), F(AVX_VNNI_INT16), F(PREFETCHITI), F(AVX10), ); kvm_cpu_cap_init(CPUID_7_2_EDX, F(INTEL_PSFD), F(IPRED_CTRL), F(RRSBA_CTRL), F(DDPD_U), F(BHI_CTRL), F(MCDT_NO), ); kvm_cpu_cap_init(CPUID_D_1_EAX, F(XSAVEOPT), F(XSAVEC), F(XGETBV1), F(XSAVES), X86_64_F(XFD), ); kvm_cpu_cap_init(CPUID_12_EAX, SCATTERED_F(SGX1), SCATTERED_F(SGX2), SCATTERED_F(SGX_EDECCSSA), ); kvm_cpu_cap_init(CPUID_24_0_EBX, F(AVX10_128), F(AVX10_256), F(AVX10_512), ); kvm_cpu_cap_init(CPUID_8000_0001_ECX, F(LAHF_LM), F(CMP_LEGACY), VENDOR_F(SVM), /* ExtApicSpace */ F(CR8_LEGACY), F(ABM), F(SSE4A), F(MISALIGNSSE), F(3DNOWPREFETCH), F(OSVW), /* IBS */ F(XOP), /* SKINIT, WDT, LWP */ F(FMA4), F(TBM), F(TOPOEXT), VENDOR_F(PERFCTR_CORE), ); kvm_cpu_cap_init(CPUID_8000_0001_EDX, ALIASED_1_EDX_F(FPU), ALIASED_1_EDX_F(VME), ALIASED_1_EDX_F(DE), ALIASED_1_EDX_F(PSE), ALIASED_1_EDX_F(TSC), ALIASED_1_EDX_F(MSR), ALIASED_1_EDX_F(PAE), ALIASED_1_EDX_F(MCE), ALIASED_1_EDX_F(CX8), ALIASED_1_EDX_F(APIC), /* Reserved */ F(SYSCALL), ALIASED_1_EDX_F(MTRR), ALIASED_1_EDX_F(PGE), ALIASED_1_EDX_F(MCA), ALIASED_1_EDX_F(CMOV), ALIASED_1_EDX_F(PAT), ALIASED_1_EDX_F(PSE36), /* Reserved */ F(NX), /* Reserved */ F(MMXEXT), ALIASED_1_EDX_F(MMX), ALIASED_1_EDX_F(FXSR), F(FXSR_OPT), X86_64_F(GBPAGES), F(RDTSCP), /* Reserved */ X86_64_F(LM), F(3DNOWEXT), F(3DNOW), ); if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64)) kvm_cpu_cap_set(X86_FEATURE_GBPAGES); kvm_cpu_cap_init(CPUID_8000_0007_EDX, SCATTERED_F(CONSTANT_TSC), ); kvm_cpu_cap_init(CPUID_8000_0008_EBX, F(CLZERO), F(XSAVEERPTR), F(WBNOINVD), F(AMD_IBPB), F(AMD_IBRS), F(AMD_SSBD), F(VIRT_SSBD), F(AMD_SSB_NO), F(AMD_STIBP), F(AMD_STIBP_ALWAYS_ON), F(AMD_PSFD), F(AMD_IBPB_RET), ); /* * AMD has separate bits for each SPEC_CTRL bit. * arch/x86/kernel/cpu/bugs.c is kind enough to * record that in cpufeatures so use them. */ if (boot_cpu_has(X86_FEATURE_IBPB)) { kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB); if (boot_cpu_has(X86_FEATURE_SPEC_CTRL) && !boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB_RET); } if (boot_cpu_has(X86_FEATURE_IBRS)) kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS); if (boot_cpu_has(X86_FEATURE_STIBP)) kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP); if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD)) kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD); if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO); /* * The preference is to use SPEC CTRL MSR instead of the * VIRT_SPEC MSR. */ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && !boot_cpu_has(X86_FEATURE_AMD_SSBD)) kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD); /* All SVM features required additional vendor module enabling. */ kvm_cpu_cap_init(CPUID_8000_000A_EDX, VENDOR_F(NPT), VENDOR_F(VMCBCLEAN), VENDOR_F(FLUSHBYASID), VENDOR_F(NRIPS), VENDOR_F(TSCRATEMSR), VENDOR_F(V_VMSAVE_VMLOAD), VENDOR_F(LBRV), VENDOR_F(PAUSEFILTER), VENDOR_F(PFTHRESHOLD), VENDOR_F(VGIF), VENDOR_F(VNMI), VENDOR_F(SVME_ADDR_CHK), ); kvm_cpu_cap_init(CPUID_8000_001F_EAX, VENDOR_F(SME), VENDOR_F(SEV), /* VM_PAGE_FLUSH */ VENDOR_F(SEV_ES), F(SME_COHERENT), ); kvm_cpu_cap_init(CPUID_8000_0021_EAX, F(NO_NESTED_DATA_BP), /* * Synthesize "LFENCE is serializing" into the AMD-defined entry * in KVM's supported CPUID, i.e. if the feature is reported as * supported by the kernel. LFENCE_RDTSC was a Linux-defined * synthetic feature long before AMD joined the bandwagon, e.g. * LFENCE is serializing on most CPUs that support SSE2. On * CPUs that don't support AMD's leaf, ANDing with the raw host * CPUID will drop the flags, and reporting support in AMD's * leaf can make it easier for userspace to detect the feature. */ SYNTHESIZED_F(LFENCE_RDTSC), /* SmmPgCfgLock */ F(NULL_SEL_CLR_BASE), F(AUTOIBRS), EMULATED_F(NO_SMM_CTL_MSR), /* PrefetchCtlMsr */ F(WRMSR_XX_BASE_NS), SYNTHESIZED_F(SBPB), SYNTHESIZED_F(IBPB_BRTYPE), SYNTHESIZED_F(SRSO_NO), F(SRSO_USER_KERNEL_NO), ); kvm_cpu_cap_init(CPUID_8000_0022_EAX, F(PERFMON_V2), ); if (!static_cpu_has_bug(X86_BUG_NULL_SEG)) kvm_cpu_cap_set(X86_FEATURE_NULL_SEL_CLR_BASE); kvm_cpu_cap_init(CPUID_C000_0001_EDX, F(XSTORE), F(XSTORE_EN), F(XCRYPT), F(XCRYPT_EN), F(ACE2), F(ACE2_EN), F(PHE), F(PHE_EN), F(PMM), F(PMM_EN), ); /* * Hide RDTSCP and RDPID if either feature is reported as supported but * probing MSR_TSC_AUX failed. This is purely a sanity check and * should never happen, but the guest will likely crash if RDTSCP or * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in * the past. For example, the sanity check may fire if this instance of * KVM is running as L1 on top of an older, broken KVM. */ if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) || kvm_cpu_cap_has(X86_FEATURE_RDPID)) && !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) { kvm_cpu_cap_clear(X86_FEATURE_RDTSCP); kvm_cpu_cap_clear(X86_FEATURE_RDPID); } } EXPORT_SYMBOL_GPL(kvm_set_cpu_caps); #undef F #undef SCATTERED_F #undef X86_64_F #undef EMULATED_F #undef SYNTHESIZED_F #undef PASSTHROUGH_F #undef ALIASED_1_EDX_F #undef VENDOR_F #undef RUNTIME_F struct kvm_cpuid_array { struct kvm_cpuid_entry2 *entries; int maxnent; int nent; }; static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array) { if (array->nent >= array->maxnent) return NULL; return &array->entries[array->nent++]; } static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array, u32 function, u32 index) { struct kvm_cpuid_entry2 *entry = get_next_cpuid(array); if (!entry) return NULL; memset(entry, 0, sizeof(*entry)); entry->function = function; entry->index = index; switch (function & 0xC0000000) { case 0x40000000: /* Hypervisor leaves are always synthesized by __do_cpuid_func. */ return entry; case 0x80000000: /* * 0x80000021 is sometimes synthesized by __do_cpuid_func, which * would result in out-of-bounds calls to do_host_cpuid. */ { static int max_cpuid_80000000; if (!READ_ONCE(max_cpuid_80000000)) WRITE_ONCE(max_cpuid_80000000, cpuid_eax(0x80000000)); if (function > READ_ONCE(max_cpuid_80000000)) return entry; } break; default: break; } cpuid_count(entry->function, entry->index, &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); if (cpuid_function_is_indexed(function)) entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; return entry; } static int cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, u32 func, bool include_partially_emulated) { memset(entry, 0, sizeof(*entry)); entry->function = func; entry->index = 0; entry->flags = 0; switch (func) { case 0: entry->eax = 7; return 1; case 1: entry->ecx = feature_bit(MOVBE); /* * KVM allows userspace to enumerate MONITOR+MWAIT support to * the guest, but the MWAIT feature flag is never advertised * to userspace because MONITOR+MWAIT aren't virtualized by * hardware, can't be faithfully emulated in software (KVM * emulates them as NOPs), and allowing the guest to execute * them natively requires enabling a per-VM capability. */ if (include_partially_emulated) entry->ecx |= feature_bit(MWAIT); return 1; case 7: entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; entry->eax = 0; if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) entry->ecx = feature_bit(RDPID); return 1; default: return 0; } } static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func) { if (array->nent >= array->maxnent) return -E2BIG; array->nent += cpuid_func_emulated(&array->entries[array->nent], func, false); return 0; } static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) { struct kvm_cpuid_entry2 *entry; int r, i, max_idx; /* all calls to cpuid_count() should be made on the same cpu */ get_cpu(); r = -E2BIG; entry = do_host_cpuid(array, function, 0); if (!entry) goto out; switch (function) { case 0: /* Limited to the highest leaf implemented in KVM. */ entry->eax = min(entry->eax, 0x24U); break; case 1: cpuid_entry_override(entry, CPUID_1_EDX); cpuid_entry_override(entry, CPUID_1_ECX); break; case 2: /* * On ancient CPUs, function 2 entries are STATEFUL. That is, * CPUID(function=2, index=0) may return different results each * time, with the least-significant byte in EAX enumerating the * number of times software should do CPUID(2, 0). * * Modern CPUs, i.e. every CPU KVM has *ever* run on are less * idiotic. Intel's SDM states that EAX & 0xff "will always * return 01H. Software should ignore this value and not * interpret it as an informational descriptor", while AMD's * APM states that CPUID(2) is reserved. * * WARN if a frankenstein CPU that supports virtualization and * a stateful CPUID.0x2 is encountered. */ WARN_ON_ONCE((entry->eax & 0xff) > 1); break; /* functions 4 and 0x8000001d have additional index. */ case 4: case 0x8000001d: /* * Read entries until the cache type in the previous entry is * zero, i.e. indicates an invalid entry. */ for (i = 1; entry->eax & 0x1f; ++i) { entry = do_host_cpuid(array, function, i); if (!entry) goto out; } break; case 6: /* Thermal management */ entry->eax = 0x4; /* allow ARAT */ entry->ebx = 0; entry->ecx = 0; entry->edx = 0; break; /* function 7 has additional index. */ case 7: max_idx = entry->eax = min(entry->eax, 2u); cpuid_entry_override(entry, CPUID_7_0_EBX); cpuid_entry_override(entry, CPUID_7_ECX); cpuid_entry_override(entry, CPUID_7_EDX); /* KVM only supports up to 0x7.2, capped above via min(). */ if (max_idx >= 1) { entry = do_host_cpuid(array, function, 1); if (!entry) goto out; cpuid_entry_override(entry, CPUID_7_1_EAX); cpuid_entry_override(entry, CPUID_7_1_EDX); entry->ebx = 0; entry->ecx = 0; } if (max_idx >= 2) { entry = do_host_cpuid(array, function, 2); if (!entry) goto out; cpuid_entry_override(entry, CPUID_7_2_EDX); entry->ecx = 0; entry->ebx = 0; entry->eax = 0; } break; case 0xa: { /* Architectural Performance Monitoring */ union cpuid10_eax eax; union cpuid10_edx edx; if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } eax.split.version_id = kvm_pmu_cap.version; eax.split.num_counters = kvm_pmu_cap.num_counters_gp; eax.split.bit_width = kvm_pmu_cap.bit_width_gp; eax.split.mask_length = kvm_pmu_cap.events_mask_len; edx.split.num_counters_fixed = kvm_pmu_cap.num_counters_fixed; edx.split.bit_width_fixed = kvm_pmu_cap.bit_width_fixed; if (kvm_pmu_cap.version) edx.split.anythread_deprecated = 1; edx.split.reserved1 = 0; edx.split.reserved2 = 0; entry->eax = eax.full; entry->ebx = kvm_pmu_cap.events_mask; entry->ecx = 0; entry->edx = edx.full; break; } case 0x1f: case 0xb: /* * No topology; a valid topology is indicated by the presence * of subleaf 1. */ entry->eax = entry->ebx = entry->ecx = 0; break; case 0xd: { u64 permitted_xcr0 = kvm_get_filtered_xcr0(); u64 permitted_xss = kvm_caps.supported_xss; entry->eax &= permitted_xcr0; entry->ebx = xstate_required_size(permitted_xcr0, false); entry->ecx = entry->ebx; entry->edx &= permitted_xcr0 >> 32; if (!permitted_xcr0) break; entry = do_host_cpuid(array, function, 1); if (!entry) goto out; cpuid_entry_override(entry, CPUID_D_1_EAX); if (entry->eax & (feature_bit(XSAVES) | feature_bit(XSAVEC))) entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss, true); else { WARN_ON_ONCE(permitted_xss != 0); entry->ebx = 0; } entry->ecx &= permitted_xss; entry->edx &= permitted_xss >> 32; for (i = 2; i < 64; ++i) { bool s_state; if (permitted_xcr0 & BIT_ULL(i)) s_state = false; else if (permitted_xss & BIT_ULL(i)) s_state = true; else continue; entry = do_host_cpuid(array, function, i); if (!entry) goto out; /* * The supported check above should have filtered out * invalid sub-leafs. Only valid sub-leafs should * reach this point, and they should have a non-zero * save state size. Furthermore, check whether the * processor agrees with permitted_xcr0/permitted_xss * on whether this is an XCR0- or IA32_XSS-managed area. */ if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) { --array->nent; continue; } if (!kvm_cpu_cap_has(X86_FEATURE_XFD)) entry->ecx &= ~BIT_ULL(2); entry->edx = 0; } break; } case 0x12: /* Intel SGX */ if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } /* * Index 0: Sub-features, MISCSELECT (a.k.a extended features) * and max enclave sizes. The SGX sub-features and MISCSELECT * are restricted by kernel and KVM capabilities (like most * feature flags), while enclave size is unrestricted. */ cpuid_entry_override(entry, CPUID_12_EAX); entry->ebx &= SGX_MISC_EXINFO; entry = do_host_cpuid(array, function, 1); if (!entry) goto out; /* * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la * feature flags. Advertise all supported flags, including * privileged attributes that require explicit opt-in from * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is * expected to derive it from supported XCR0. */ entry->eax &= SGX_ATTR_PRIV_MASK | SGX_ATTR_UNPRIV_MASK; entry->ebx &= 0; break; /* Intel PT */ case 0x14: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { if (!do_host_cpuid(array, function, i)) goto out; } break; /* Intel AMX TILE */ case 0x1d: if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) { if (!do_host_cpuid(array, function, i)) goto out; } break; case 0x1e: /* TMUL information */ if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } break; case 0x24: { u8 avx10_version; if (!kvm_cpu_cap_has(X86_FEATURE_AVX10)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } /* * The AVX10 version is encoded in EBX[7:0]. Note, the version * is guaranteed to be >=1 if AVX10 is supported. Note #2, the * version needs to be captured before overriding EBX features! */ avx10_version = min_t(u8, entry->ebx & 0xff, 1); cpuid_entry_override(entry, CPUID_24_0_EBX); entry->ebx |= avx10_version; entry->eax = 0; entry->ecx = 0; entry->edx = 0; break; } case KVM_CPUID_SIGNATURE: { const u32 *sigptr = (const u32 *)KVM_SIGNATURE; entry->eax = KVM_CPUID_FEATURES; entry->ebx = sigptr[0]; entry->ecx = sigptr[1]; entry->edx = sigptr[2]; break; } case KVM_CPUID_FEATURES: entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | (1 << KVM_FEATURE_NOP_IO_DELAY) | (1 << KVM_FEATURE_CLOCKSOURCE2) | (1 << KVM_FEATURE_ASYNC_PF) | (1 << KVM_FEATURE_PV_EOI) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | (1 << KVM_FEATURE_PV_UNHALT) | (1 << KVM_FEATURE_PV_TLB_FLUSH) | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | (1 << KVM_FEATURE_PV_SEND_IPI) | (1 << KVM_FEATURE_POLL_CONTROL) | (1 << KVM_FEATURE_PV_SCHED_YIELD) | (1 << KVM_FEATURE_ASYNC_PF_INT); if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); entry->ebx = 0; entry->ecx = 0; entry->edx = 0; break; case 0x80000000: entry->eax = min(entry->eax, 0x80000022); /* * Serializing LFENCE is reported in a multitude of ways, and * NullSegClearsBase is not reported in CPUID on Zen2; help * userspace by providing the CPUID leaf ourselves. * * However, only do it if the host has CPUID leaf 0x8000001d. * QEMU thinks that it can query the host blindly for that * CPUID leaf if KVM reports that it supports 0x8000001d or * above. The processor merrily returns values from the * highest Intel leaf which QEMU tries to use as the guest's * 0x8000001d. Even worse, this can result in an infinite * loop if said highest leaf has no subleaves indexed by ECX. */ if (entry->eax >= 0x8000001d && (static_cpu_has(X86_FEATURE_LFENCE_RDTSC) || !static_cpu_has_bug(X86_BUG_NULL_SEG))) entry->eax = max(entry->eax, 0x80000021); break; case 0x80000001: entry->ebx &= ~GENMASK(27, 16); cpuid_entry_override(entry, CPUID_8000_0001_EDX); cpuid_entry_override(entry, CPUID_8000_0001_ECX); break; case 0x80000005: /* Pass host L1 cache and TLB info. */ break; case 0x80000006: /* Drop reserved bits, pass host L2 cache and TLB info. */ entry->edx &= ~GENMASK(17, 16); break; case 0x80000007: /* Advanced power management */ cpuid_entry_override(entry, CPUID_8000_0007_EDX); /* mask against host */ entry->edx &= boot_cpu_data.x86_power; entry->eax = entry->ebx = entry->ecx = 0; break; case 0x80000008: { /* * GuestPhysAddrSize (EAX[23:16]) is intended for software * use. * * KVM's ABI is to report the effective MAXPHYADDR for the * guest in PhysAddrSize (phys_as), and the maximum * *addressable* GPA in GuestPhysAddrSize (g_phys_as). * * GuestPhysAddrSize is valid if and only if TDP is enabled, * in which case the max GPA that can be addressed by KVM may * be less than the max GPA that can be legally generated by * the guest, e.g. if MAXPHYADDR>48 but the CPU doesn't * support 5-level TDP. */ unsigned int virt_as = max((entry->eax >> 8) & 0xff, 48U); unsigned int phys_as, g_phys_as; /* * If TDP (NPT) is disabled use the adjusted host MAXPHYADDR as * the guest operates in the same PA space as the host, i.e. * reductions in MAXPHYADDR for memory encryption affect shadow * paging, too. * * If TDP is enabled, use the raw bare metal MAXPHYADDR as * reductions to the HPAs do not affect GPAs. The max * addressable GPA is the same as the max effective GPA, except * that it's capped at 48 bits if 5-level TDP isn't supported * (hardware processes bits 51:48 only when walking the fifth * level page table). */ if (!tdp_enabled) { phys_as = boot_cpu_data.x86_phys_bits; g_phys_as = 0; } else { phys_as = entry->eax & 0xff; g_phys_as = phys_as; if (kvm_mmu_get_max_tdp_level() < 5) g_phys_as = min(g_phys_as, 48U); } entry->eax = phys_as | (virt_as << 8) | (g_phys_as << 16); entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8)); entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0008_EBX); break; } case 0x8000000A: if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } entry->eax = 1; /* SVM revision 1 */ entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper ASID emulation to nested SVM */ entry->ecx = 0; /* Reserved */ cpuid_entry_override(entry, CPUID_8000_000A_EDX); break; case 0x80000019: entry->ecx = entry->edx = 0; break; case 0x8000001a: entry->eax &= GENMASK(2, 0); entry->ebx = entry->ecx = entry->edx = 0; break; case 0x8000001e: /* Do not return host topology information. */ entry->eax = entry->ebx = entry->ecx = 0; entry->edx = 0; /* reserved */ break; case 0x8000001F: if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) { entry->eax = entry->ebx = entry->ecx = entry->edx = 0; } else { cpuid_entry_override(entry, CPUID_8000_001F_EAX); /* Clear NumVMPL since KVM does not support VMPL. */ entry->ebx &= ~GENMASK(31, 12); /* * Enumerate '0' for "PA bits reduction", the adjusted * MAXPHYADDR is enumerated directly (see 0x80000008). */ entry->ebx &= ~GENMASK(11, 6); } break; case 0x80000020: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; case 0x80000021: entry->ebx = entry->ecx = entry->edx = 0; cpuid_entry_override(entry, CPUID_8000_0021_EAX); break; /* AMD Extended Performance Monitoring and Debug */ case 0x80000022: { union cpuid_0x80000022_ebx ebx; entry->ecx = entry->edx = 0; if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) { entry->eax = entry->ebx = 0; break; } cpuid_entry_override(entry, CPUID_8000_0022_EAX); ebx.split.num_core_pmc = kvm_pmu_cap.num_counters_gp; entry->ebx = ebx.full; break; } /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: /*Just support up to 0xC0000004 now*/ entry->eax = min(entry->eax, 0xC0000004); break; case 0xC0000001: cpuid_entry_override(entry, CPUID_C000_0001_EDX); break; case 3: /* Processor serial number */ case 5: /* MONITOR/MWAIT */ case 0xC0000002: case 0xC0000003: case 0xC0000004: default: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; } r = 0; out: put_cpu(); return r; } static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func, unsigned int type) { if (type == KVM_GET_EMULATED_CPUID) return __do_cpuid_func_emulated(array, func); return __do_cpuid_func(array, func); } #define CENTAUR_CPUID_SIGNATURE 0xC0000000 static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func, unsigned int type) { u32 limit; int r; if (func == CENTAUR_CPUID_SIGNATURE && boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR) return 0; r = do_cpuid_func(array, func, type); if (r) return r; limit = array->entries[array->nent - 1].eax; for (func = func + 1; func <= limit; ++func) { r = do_cpuid_func(array, func, type); if (r) break; } return r; } static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, __u32 num_entries, unsigned int ioctl_type) { int i; __u32 pad[3]; if (ioctl_type != KVM_GET_EMULATED_CPUID) return false; /* * We want to make sure that ->padding is being passed clean from * userspace in case we want to use it for something in the future. * * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we * have to give ourselves satisfied only with the emulated side. /me * sheds a tear. */ for (i = 0; i < num_entries; i++) { if (copy_from_user(pad, entries[i].padding, sizeof(pad))) return true; if (pad[0] || pad[1] || pad[2]) return true; } return false; } int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries, unsigned int type) { static const u32 funcs[] = { 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE, }; struct kvm_cpuid_array array = { .nent = 0, }; int r, i; if (cpuid->nent < 1) return -E2BIG; if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) cpuid->nent = KVM_MAX_CPUID_ENTRIES; if (sanity_check_entries(entries, cpuid->nent, type)) return -EINVAL; array.entries = kvcalloc(cpuid->nent, sizeof(struct kvm_cpuid_entry2), GFP_KERNEL); if (!array.entries) return -ENOMEM; array.maxnent = cpuid->nent; for (i = 0; i < ARRAY_SIZE(funcs); i++) { r = get_cpuid_func(&array, funcs[i], type); if (r) goto out_free; } cpuid->nent = array.nent; if (copy_to_user(entries, array.entries, array.nent * sizeof(struct kvm_cpuid_entry2))) r = -EFAULT; out_free: kvfree(array.entries); return r; } /* * Intel CPUID semantics treats any query for an out-of-range leaf as if the * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics * returns all zeroes for any undefined leaf, whether or not the leaf is in * range. Centaur/VIA follows Intel semantics. * * A leaf is considered out-of-range if its function is higher than the maximum * supported leaf of its associated class or if its associated class does not * exist. * * There are three primary classes to be considered, with their respective * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary * class exists if a guest CPUID entry for its <base> leaf exists. For a given * class, CPUID.<base>.EAX contains the max supported leaf for the class. * * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff * - Hypervisor: 0x40000000 - 0x4fffffff * - Extended: 0x80000000 - 0xbfffffff * - Centaur: 0xc0000000 - 0xcfffffff * * The Hypervisor class is further subdivided into sub-classes that each act as * their own independent class associated with a 0x100 byte range. E.g. if Qemu * is advertising support for both HyperV and KVM, the resulting Hypervisor * CPUID sub-classes are: * * - HyperV: 0x40000000 - 0x400000ff * - KVM: 0x40000100 - 0x400001ff */ static struct kvm_cpuid_entry2 * get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index) { struct kvm_cpuid_entry2 *basic, *class; u32 function = *fn_ptr; basic = kvm_find_cpuid_entry(vcpu, 0); if (!basic) return NULL; if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) || is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx)) return NULL; if (function >= 0x40000000 && function <= 0x4fffffff) class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00); else if (function >= 0xc0000000) class = kvm_find_cpuid_entry(vcpu, 0xc0000000); else class = kvm_find_cpuid_entry(vcpu, function & 0x80000000); if (class && function <= class->eax) return NULL; /* * Leaf specific adjustments are also applied when redirecting to the * max basic entry, e.g. if the max basic leaf is 0xb but there is no * entry for CPUID.0xb.index (see below), then the output value for EDX * needs to be pulled from CPUID.0xb.1. */ *fn_ptr = basic->eax; /* * The class does not exist or the requested function is out of range; * the effective CPUID entry is the max basic leaf. Note, the index of * the original requested leaf is observed! */ return kvm_find_cpuid_entry_index(vcpu, basic->eax, index); } bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only) { u32 orig_function = *eax, function = *eax, index = *ecx; struct kvm_cpuid_entry2 *entry; bool exact, used_max_basic = false; if (vcpu->arch.cpuid_dynamic_bits_dirty) kvm_update_cpuid_runtime(vcpu); entry = kvm_find_cpuid_entry_index(vcpu, function, index); exact = !!entry; if (!entry && !exact_only) { entry = get_out_of_range_cpuid_entry(vcpu, &function, index); used_max_basic = !!entry; } if (entry) { *eax = entry->eax; *ebx = entry->ebx; *ecx = entry->ecx; *edx = entry->edx; if (function == 7 && index == 0) { u64 data; if ((*ebx & (feature_bit(RTM) | feature_bit(HLE))) && !__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) && (data & TSX_CTRL_CPUID_CLEAR)) *ebx &= ~(feature_bit(RTM) | feature_bit(HLE)); } else if (function == 0x80000007) { if (kvm_hv_invtsc_suppressed(vcpu)) *edx &= ~feature_bit(CONSTANT_TSC); } else if (IS_ENABLED(CONFIG_KVM_XEN) && kvm_xen_is_tsc_leaf(vcpu, function)) { /* * Update guest TSC frequency information if necessary. * Ignore failures, there is no sane value that can be * provided if KVM can't get the TSC frequency. */ if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) kvm_guest_time_update(vcpu); if (index == 1) { *ecx = vcpu->arch.pvclock_tsc_mul; *edx = vcpu->arch.pvclock_tsc_shift; } else if (index == 2) { *eax = vcpu->arch.hw_tsc_khz; } } } else { *eax = *ebx = *ecx = *edx = 0; /* * When leaf 0BH or 1FH is defined, CL is pass-through * and EDX is always the x2APIC ID, even for undefined * subleaves. Index 1 will exist iff the leaf is * implemented, so we pass through CL iff leaf 1 * exists. EDX can be copied from any existing index. */ if (function == 0xb || function == 0x1f) { entry = kvm_find_cpuid_entry_index(vcpu, function, 1); if (entry) { *ecx = index & 0xff; *edx = entry->edx; } } } trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact, used_max_basic); return exact; } EXPORT_SYMBOL_GPL(kvm_cpuid); int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) { u32 eax, ebx, ecx, edx; if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) return 1; eax = kvm_rax_read(vcpu); ecx = kvm_rcx_read(vcpu); kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false); kvm_rax_write(vcpu, eax); kvm_rbx_write(vcpu, ebx); kvm_rcx_write(vcpu, ecx); kvm_rdx_write(vcpu, edx); return kvm_skip_emulated_instruction(vcpu); } EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 | // SPDX-License-Identifier: GPL-2.0-only /* * misc.c * * PURPOSE * Miscellaneous routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * (C) 1998 Dave Boynton * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 04/19/99 blf partial support for reading/writing specific EA's */ #include "udfdecl.h" #include <linux/fs.h> #include <linux/string.h> #include <linux/crc-itu-t.h> #include "udf_i.h" #include "udf_sb.h" struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size, uint32_t type, uint8_t loc) { uint8_t *ea = NULL, *ad = NULL; int offset; uint16_t crclen; struct udf_inode_info *iinfo = UDF_I(inode); ea = iinfo->i_data; if (iinfo->i_lenEAttr) { ad = iinfo->i_data + iinfo->i_lenEAttr; } else { ad = ea; size += sizeof(struct extendedAttrHeaderDesc); } offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) - iinfo->i_lenAlloc; /* TODO - Check for FreeEASpace */ if (loc & 0x01 && offset >= size) { struct extendedAttrHeaderDesc *eahd; eahd = (struct extendedAttrHeaderDesc *)ea; if (iinfo->i_lenAlloc) memmove(&ad[size], ad, iinfo->i_lenAlloc); if (iinfo->i_lenEAttr) { /* check checksum/crc */ if (eahd->descTag.tagIdent != cpu_to_le16(TAG_IDENT_EAHD) || le32_to_cpu(eahd->descTag.tagLocation) != iinfo->i_location.logicalBlockNum) return NULL; } else { struct udf_sb_info *sbi = UDF_SB(inode->i_sb); size -= sizeof(struct extendedAttrHeaderDesc); iinfo->i_lenEAttr += sizeof(struct extendedAttrHeaderDesc); eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD); if (sbi->s_udfrev >= 0x0200) eahd->descTag.descVersion = cpu_to_le16(3); else eahd->descTag.descVersion = cpu_to_le16(2); eahd->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number); eahd->descTag.tagLocation = cpu_to_le32( iinfo->i_location.logicalBlockNum); eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF); eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF); } offset = iinfo->i_lenEAttr; if (type < 2048) { if (le32_to_cpu(eahd->appAttrLocation) < iinfo->i_lenEAttr) { uint32_t aal = le32_to_cpu(eahd->appAttrLocation); memmove(&ea[offset - aal + size], &ea[aal], offset - aal); offset -= aal; eahd->appAttrLocation = cpu_to_le32(aal + size); } if (le32_to_cpu(eahd->impAttrLocation) < iinfo->i_lenEAttr) { uint32_t ial = le32_to_cpu(eahd->impAttrLocation); memmove(&ea[offset - ial + size], &ea[ial], offset - ial); offset -= ial; eahd->impAttrLocation = cpu_to_le32(ial + size); } } else if (type < 65536) { if (le32_to_cpu(eahd->appAttrLocation) < iinfo->i_lenEAttr) { uint32_t aal = le32_to_cpu(eahd->appAttrLocation); memmove(&ea[offset - aal + size], &ea[aal], offset - aal); offset -= aal; eahd->appAttrLocation = cpu_to_le32(aal + size); } } /* rewrite CRC + checksum of eahd */ crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(struct tag); eahd->descTag.descCRCLength = cpu_to_le16(crclen); eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd + sizeof(struct tag), crclen)); eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag); iinfo->i_lenEAttr += size; return (struct genericFormat *)&ea[offset]; } return NULL; } struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type, uint8_t subtype) { struct genericFormat *gaf; uint8_t *ea = NULL; uint32_t offset; struct udf_inode_info *iinfo = UDF_I(inode); ea = iinfo->i_data; if (iinfo->i_lenEAttr) { struct extendedAttrHeaderDesc *eahd; eahd = (struct extendedAttrHeaderDesc *)ea; /* check checksum/crc */ if (eahd->descTag.tagIdent != cpu_to_le16(TAG_IDENT_EAHD) || le32_to_cpu(eahd->descTag.tagLocation) != iinfo->i_location.logicalBlockNum) return NULL; if (type < 2048) offset = sizeof(struct extendedAttrHeaderDesc); else if (type < 65536) offset = le32_to_cpu(eahd->impAttrLocation); else offset = le32_to_cpu(eahd->appAttrLocation); while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) { uint32_t attrLength; gaf = (struct genericFormat *)&ea[offset]; attrLength = le32_to_cpu(gaf->attrLength); /* Detect undersized elements and buffer overflows */ if ((attrLength < sizeof(*gaf)) || (attrLength > (iinfo->i_lenEAttr - offset))) break; if (le32_to_cpu(gaf->attrType) == type && gaf->attrSubtype == subtype) return gaf; else offset += attrLength; } } return NULL; } /* * udf_read_tagged * * PURPOSE * Read the first block of a tagged descriptor. * * HISTORY * July 1, 1997 - Andrew E. Mileski * Written, tested, and released. */ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block, uint32_t location, uint16_t *ident) { struct tag *tag_p; struct buffer_head *bh = NULL; u8 checksum; /* Read the block */ if (block == 0xFFFFFFFF) return NULL; bh = sb_bread(sb, block); if (!bh) { udf_err(sb, "read failed, block=%u, location=%u\n", block, location); return NULL; } tag_p = (struct tag *)(bh->b_data); *ident = le16_to_cpu(tag_p->tagIdent); if (location != le32_to_cpu(tag_p->tagLocation)) { udf_debug("location mismatch block %u, tag %u != %u\n", block, le32_to_cpu(tag_p->tagLocation), location); goto error_out; } /* Verify the tag checksum */ checksum = udf_tag_checksum(tag_p); if (checksum != tag_p->tagChecksum) { udf_err(sb, "tag checksum failed, block %u: 0x%02x != 0x%02x\n", block, checksum, tag_p->tagChecksum); goto error_out; } /* Verify the tag version */ if (tag_p->descVersion != cpu_to_le16(0x0002U) && tag_p->descVersion != cpu_to_le16(0x0003U)) { udf_err(sb, "tag version 0x%04x != 0x0002 || 0x0003, block %u\n", le16_to_cpu(tag_p->descVersion), block); goto error_out; } /* Verify the descriptor CRC */ if (le16_to_cpu(tag_p->descCRCLength) + sizeof(struct tag) > sb->s_blocksize || le16_to_cpu(tag_p->descCRC) == crc_itu_t(0, bh->b_data + sizeof(struct tag), le16_to_cpu(tag_p->descCRCLength))) return bh; udf_debug("Crc failure block %u: crc = %u, crclen = %u\n", block, le16_to_cpu(tag_p->descCRC), le16_to_cpu(tag_p->descCRCLength)); error_out: brelse(bh); return NULL; } struct buffer_head *udf_read_ptagged(struct super_block *sb, struct kernel_lb_addr *loc, uint32_t offset, uint16_t *ident) { return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset), loc->logicalBlockNum + offset, ident); } void udf_update_tag(char *data, int length) { struct tag *tptr = (struct tag *)data; length -= sizeof(struct tag); tptr->descCRCLength = cpu_to_le16(length); tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(struct tag), length)); tptr->tagChecksum = udf_tag_checksum(tptr); } void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum, uint32_t loc, int length) { struct tag *tptr = (struct tag *)data; tptr->tagIdent = cpu_to_le16(ident); tptr->descVersion = cpu_to_le16(version); tptr->tagSerialNum = cpu_to_le16(snum); tptr->tagLocation = cpu_to_le32(loc); udf_update_tag(data, length); } u8 udf_tag_checksum(const struct tag *t) { u8 *data = (u8 *)t; u8 checksum = 0; int i; for (i = 0; i < sizeof(struct tag); ++i) if (i != 4) /* position of checksum */ checksum += data[i]; return checksum; } |
838 5 6 921 70 954 18 1749 17 5 5 17 168 6 6 1 4 7 3754 2937 1064 182 4128 2534 192 1751 163 1677 418 190 1756 1679 417 418 418 415 5 5 5 417 416 407 37 407 407 406 398 17 389 390 390 15 378 410 1720 273 1768 1535 479 848 242 848 242 140 141 161 160 167 167 3904 208 208 67 67 39 39 32 32 61 41 15 4 2 2 2 2 2 2 712 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 | // SPDX-License-Identifier: GPL-2.0-only /* * Integrity Measurement Architecture * * Copyright (C) 2005,2006,2007,2008 IBM Corporation * * Authors: * Reiner Sailer <sailer@watson.ibm.com> * Serge Hallyn <serue@us.ibm.com> * Kylene Hall <kylene@us.ibm.com> * Mimi Zohar <zohar@us.ibm.com> * * File: ima_main.c * implements the IMA hooks: ima_bprm_check, ima_file_mmap, * and ima_file_check. */ #include <linux/module.h> #include <linux/file.h> #include <linux/binfmts.h> #include <linux/kernel_read_file.h> #include <linux/mount.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/xattr.h> #include <linux/ima.h> #include <linux/fs.h> #include <linux/iversion.h> #include <linux/evm.h> #include "ima.h" #ifdef CONFIG_IMA_APPRAISE int ima_appraise = IMA_APPRAISE_ENFORCE; #else int ima_appraise; #endif int __ro_after_init ima_hash_algo = HASH_ALGO_SHA1; static int hash_setup_done; static struct notifier_block ima_lsm_policy_notifier = { .notifier_call = ima_lsm_policy_change, }; static int __init hash_setup(char *str) { struct ima_template_desc *template_desc = ima_template_desc_current(); int i; if (hash_setup_done) return 1; if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) { if (strncmp(str, "sha1", 4) == 0) { ima_hash_algo = HASH_ALGO_SHA1; } else if (strncmp(str, "md5", 3) == 0) { ima_hash_algo = HASH_ALGO_MD5; } else { pr_err("invalid hash algorithm \"%s\" for template \"%s\"", str, IMA_TEMPLATE_IMA_NAME); return 1; } goto out; } i = match_string(hash_algo_name, HASH_ALGO__LAST, str); if (i < 0) { pr_err("invalid hash algorithm \"%s\"", str); return 1; } ima_hash_algo = i; out: hash_setup_done = 1; return 1; } __setup("ima_hash=", hash_setup); enum hash_algo ima_get_current_hash_algo(void) { return ima_hash_algo; } /* Prevent mmap'ing a file execute that is already mmap'ed write */ static int mmap_violation_check(enum ima_hooks func, struct file *file, char **pathbuf, const char **pathname, char *filename) { struct inode *inode; int rc = 0; if ((func == MMAP_CHECK || func == MMAP_CHECK_REQPROT) && mapping_writably_mapped(file->f_mapping)) { rc = -ETXTBSY; inode = file_inode(file); if (!*pathbuf) /* ima_rdwr_violation possibly pre-fetched */ *pathname = ima_d_path(&file->f_path, pathbuf, filename); integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, *pathname, "mmap_file", "mmapped_writers", rc, 0); } return rc; } /* * ima_rdwr_violation_check * * Only invalidate the PCR for measured files: * - Opening a file for write when already open for read, * results in a time of measure, time of use (ToMToU) error. * - Opening a file for read when already open for write, * could result in a file measurement error. * */ static void ima_rdwr_violation_check(struct file *file, struct ima_iint_cache *iint, int must_measure, char **pathbuf, const char **pathname, char *filename) { struct inode *inode = file_inode(file); fmode_t mode = file->f_mode; bool send_tomtou = false, send_writers = false; if (mode & FMODE_WRITE) { if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) { if (!iint) iint = ima_iint_find(inode); /* IMA_MEASURE is set from reader side */ if (iint && test_and_clear_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags)) send_tomtou = true; } } else { if (must_measure) set_bit(IMA_MAY_EMIT_TOMTOU, &iint->atomic_flags); /* Limit number of open_writers violations */ if (inode_is_open_for_write(inode) && must_measure) { if (!test_and_set_bit(IMA_EMITTED_OPENWRITERS, &iint->atomic_flags)) send_writers = true; } } if (!send_tomtou && !send_writers) return; *pathname = ima_d_path(&file->f_path, pathbuf, filename); if (send_tomtou) ima_add_violation(file, *pathname, iint, "invalid_pcr", "ToMToU"); if (send_writers) ima_add_violation(file, *pathname, iint, "invalid_pcr", "open_writers"); } static void ima_check_last_writer(struct ima_iint_cache *iint, struct inode *inode, struct file *file) { fmode_t mode = file->f_mode; bool update; if (!(mode & FMODE_WRITE)) return; mutex_lock(&iint->mutex); if (atomic_read(&inode->i_writecount) == 1) { struct kstat stat; clear_bit(IMA_EMITTED_OPENWRITERS, &iint->atomic_flags); update = test_and_clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); if ((iint->flags & IMA_NEW_FILE) || vfs_getattr_nosec(&file->f_path, &stat, STATX_CHANGE_COOKIE, AT_STATX_SYNC_AS_STAT) || !(stat.result_mask & STATX_CHANGE_COOKIE) || stat.change_cookie != iint->real_inode.version) { iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); iint->measured_pcrs = 0; if (update) ima_update_xattr(iint, file); } } mutex_unlock(&iint->mutex); } /** * ima_file_free - called on __fput() * @file: pointer to file structure being freed * * Flag files that changed, based on i_version */ static void ima_file_free(struct file *file) { struct inode *inode = file_inode(file); struct ima_iint_cache *iint; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return; iint = ima_iint_find(inode); if (!iint) return; ima_check_last_writer(iint, inode, file); } static int process_measurement(struct file *file, const struct cred *cred, struct lsm_prop *prop, char *buf, loff_t size, int mask, enum ima_hooks func) { struct inode *real_inode, *inode = file_inode(file); struct ima_iint_cache *iint = NULL; struct ima_template_desc *template_desc = NULL; struct inode *metadata_inode; char *pathbuf = NULL; char filename[NAME_MAX]; const char *pathname = NULL; int rc = 0, action, must_appraise = 0; int pcr = CONFIG_IMA_MEASURE_PCR_IDX; struct evm_ima_xattr_data *xattr_value = NULL; struct modsig *modsig = NULL; int xattr_len = 0; bool violation_check; enum hash_algo hash_algo; unsigned int allowed_algos = 0; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return 0; /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action * bitmask based on the appraise/audit/measurement policy. * Included is the appraise submask. */ action = ima_get_action(file_mnt_idmap(file), inode, cred, prop, mask, func, &pcr, &template_desc, NULL, &allowed_algos); violation_check = ((func == FILE_CHECK || func == MMAP_CHECK || func == MMAP_CHECK_REQPROT) && (ima_policy_flag & IMA_MEASURE)); if (!action && !violation_check) return 0; must_appraise = action & IMA_APPRAISE; /* Is the appraise rule hook specific? */ if (action & IMA_FILE_APPRAISE) func = FILE_CHECK; inode_lock(inode); if (action) { iint = ima_inode_get(inode); if (!iint) rc = -ENOMEM; } if (!rc && violation_check) ima_rdwr_violation_check(file, iint, action & IMA_MEASURE, &pathbuf, &pathname, filename); inode_unlock(inode); if (rc) goto out; if (!action) goto out; mutex_lock(&iint->mutex); if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags)) /* * Reset appraisal flags (action and non-action rule-specific) * if ima_inode_post_setattr was called. */ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | IMA_NONACTION_RULE_FLAGS); /* * Re-evaulate the file if either the xattr has changed or the * kernel has no way of detecting file change on the filesystem. * (Limited to privileged mounted filesystems.) */ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags) || ((inode->i_sb->s_iflags & SB_I_IMA_UNVERIFIABLE_SIGNATURE) && !(inode->i_sb->s_iflags & SB_I_UNTRUSTED_MOUNTER) && !(action & IMA_FAIL_UNVERIFIABLE_SIGS))) { iint->flags &= ~IMA_DONE_MASK; iint->measured_pcrs = 0; } /* * On stacked filesystems, detect and re-evaluate file data and * metadata changes. */ real_inode = d_real_inode(file_dentry(file)); if (real_inode != inode && (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) { if (!IS_I_VERSION(real_inode) || integrity_inode_attrs_changed(&iint->real_inode, real_inode)) { iint->flags &= ~IMA_DONE_MASK; iint->measured_pcrs = 0; } /* * Reset the EVM status when metadata changed. */ metadata_inode = d_inode(d_real(file_dentry(file), D_REAL_METADATA)); if (evm_metadata_changed(inode, metadata_inode)) iint->flags &= ~(IMA_APPRAISED | IMA_APPRAISED_SUBMASK); } /* Determine if already appraised/measured based on bitmask * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED, * IMA_AUDIT, IMA_AUDITED) */ iint->flags |= action; action &= IMA_DO_MASK; action &= ~((iint->flags & (IMA_DONE_MASK ^ IMA_MEASURED)) >> 1); /* If target pcr is already measured, unset IMA_MEASURE action */ if ((action & IMA_MEASURE) && (iint->measured_pcrs & (0x1 << pcr))) action ^= IMA_MEASURE; /* HASH sets the digital signature and update flags, nothing else */ if ((action & IMA_HASH) && !(test_bit(IMA_DIGSIG, &iint->atomic_flags))) { xattr_len = ima_read_xattr(file_dentry(file), &xattr_value, xattr_len); if ((xattr_value && xattr_len > 2) && (xattr_value->type == EVM_IMA_XATTR_DIGSIG)) set_bit(IMA_DIGSIG, &iint->atomic_flags); iint->flags |= IMA_HASHED; action ^= IMA_HASH; set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); } /* Nothing to do, just return existing appraised status */ if (!action) { if (must_appraise) { rc = mmap_violation_check(func, file, &pathbuf, &pathname, filename); if (!rc) rc = ima_get_cache_status(iint, func); } goto out_locked; } if ((action & IMA_APPRAISE_SUBMASK) || strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) != 0) { /* read 'security.ima' */ xattr_len = ima_read_xattr(file_dentry(file), &xattr_value, xattr_len); /* * Read the appended modsig if allowed by the policy, and allow * an additional measurement list entry, if needed, based on the * template format and whether the file was already measured. */ if (iint->flags & IMA_MODSIG_ALLOWED) { rc = ima_read_modsig(func, buf, size, &modsig); if (!rc && ima_template_has_modsig(template_desc) && iint->flags & IMA_MEASURED) action |= IMA_MEASURE; } } hash_algo = ima_get_hash_algo(xattr_value, xattr_len); rc = ima_collect_measurement(iint, file, buf, size, hash_algo, modsig); if (rc != 0 && rc != -EBADF && rc != -EINVAL) goto out_locked; if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */ pathname = ima_d_path(&file->f_path, &pathbuf, filename); if (action & IMA_MEASURE) ima_store_measurement(iint, file, pathname, xattr_value, xattr_len, modsig, pcr, template_desc); if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) { rc = ima_check_blacklist(iint, modsig, pcr); if (rc != -EPERM) { inode_lock(inode); rc = ima_appraise_measurement(func, iint, file, pathname, xattr_value, xattr_len, modsig); inode_unlock(inode); } if (!rc) rc = mmap_violation_check(func, file, &pathbuf, &pathname, filename); } if (action & IMA_AUDIT) ima_audit_measurement(iint, pathname); if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO)) rc = 0; /* Ensure the digest was generated using an allowed algorithm */ if (rc == 0 && must_appraise && allowed_algos != 0 && (allowed_algos & (1U << hash_algo)) == 0) { rc = -EACCES; integrity_audit_msg(AUDIT_INTEGRITY_DATA, file_inode(file), pathname, "collect_data", "denied-hash-algorithm", rc, 0); } out_locked: if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) && !(iint->flags & IMA_NEW_FILE)) rc = -EACCES; mutex_unlock(&iint->mutex); kfree(xattr_value); ima_free_modsig(modsig); out: if (pathbuf) __putname(pathbuf); if (must_appraise) { if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE)) return -EACCES; if (file->f_mode & FMODE_WRITE) set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); } return 0; } /** * ima_file_mmap - based on policy, collect/store measurement. * @file: pointer to the file to be measured (May be NULL) * @reqprot: protection requested by the application * @prot: protection that will be applied by the kernel * @flags: operational flags * * Measure files being mmapped executable based on the ima_must_measure() * policy decision. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_file_mmap(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) { struct lsm_prop prop; int ret; if (!file) return 0; security_current_getlsmprop_subj(&prop); if (reqprot & PROT_EXEC) { ret = process_measurement(file, current_cred(), &prop, NULL, 0, MAY_EXEC, MMAP_CHECK_REQPROT); if (ret) return ret; } if (prot & PROT_EXEC) return process_measurement(file, current_cred(), &prop, NULL, 0, MAY_EXEC, MMAP_CHECK); return 0; } /** * ima_file_mprotect - based on policy, limit mprotect change * @vma: vm_area_struct protection is set to * @reqprot: protection requested by the application * @prot: protection that will be applied by the kernel * * Files can be mmap'ed read/write and later changed to execute to circumvent * IMA's mmap appraisal policy rules. Due to locking issues (mmap semaphore * would be taken before i_mutex), files can not be measured or appraised at * this point. Eliminate this integrity gap by denying the mprotect * PROT_EXECUTE change, if an mmap appraise policy rule exists. * * On mprotect change success, return 0. On failure, return -EACESS. */ static int ima_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { struct ima_template_desc *template = NULL; struct file *file; char filename[NAME_MAX]; char *pathbuf = NULL; const char *pathname = NULL; struct inode *inode; struct lsm_prop prop; int result = 0; int action; int pcr; /* Is mprotect making an mmap'ed file executable? */ if (!(ima_policy_flag & IMA_APPRAISE) || !vma->vm_file || !(prot & PROT_EXEC) || (vma->vm_flags & VM_EXEC)) return 0; security_current_getlsmprop_subj(&prop); inode = file_inode(vma->vm_file); action = ima_get_action(file_mnt_idmap(vma->vm_file), inode, current_cred(), &prop, MAY_EXEC, MMAP_CHECK, &pcr, &template, NULL, NULL); action |= ima_get_action(file_mnt_idmap(vma->vm_file), inode, current_cred(), &prop, MAY_EXEC, MMAP_CHECK_REQPROT, &pcr, &template, NULL, NULL); /* Is the mmap'ed file in policy? */ if (!(action & (IMA_MEASURE | IMA_APPRAISE_SUBMASK))) return 0; if (action & IMA_APPRAISE_SUBMASK) result = -EPERM; file = vma->vm_file; pathname = ima_d_path(&file->f_path, &pathbuf, filename); integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, pathname, "collect_data", "failed-mprotect", result, 0); if (pathbuf) __putname(pathbuf); return result; } /** * ima_bprm_check - based on policy, collect/store measurement. * @bprm: contains the linux_binprm structure * * The OS protects against an executable file, already open for write, * from being executed in deny_write_access() and an executable file, * already open for execute, from being modified in get_write_access(). * So we can be certain that what we verify and measure here is actually * what is being executed. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_bprm_check(struct linux_binprm *bprm) { int ret; struct lsm_prop prop; security_current_getlsmprop_subj(&prop); ret = process_measurement(bprm->file, current_cred(), &prop, NULL, 0, MAY_EXEC, BPRM_CHECK); if (ret) return ret; security_cred_getlsmprop(bprm->cred, &prop); return process_measurement(bprm->file, bprm->cred, &prop, NULL, 0, MAY_EXEC, CREDS_CHECK); } /** * ima_bprm_creds_for_exec - collect/store/appraise measurement. * @bprm: contains the linux_binprm structure * * Based on the IMA policy and the execveat(2) AT_EXECVE_CHECK flag, measure * and appraise the integrity of a file to be executed by script interpreters. * Unlike any of the other LSM hooks where the kernel enforces file integrity, * enforcing file integrity is left up to the discretion of the script * interpreter (userspace). * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_bprm_creds_for_exec(struct linux_binprm *bprm) { /* * As security_bprm_check() is called multiple times, both * the script and the shebang interpreter are measured, appraised, * and audited. Limit usage of this LSM hook to just measuring, * appraising, and auditing the indirect script execution * (e.g. ./sh example.sh). */ if (!bprm->is_check) return 0; return ima_bprm_check(bprm); } /** * ima_file_check - based on policy, collect/store measurement. * @file: pointer to the file to be measured * @mask: contains MAY_READ, MAY_WRITE, MAY_EXEC or MAY_APPEND * * Measure files based on the ima_must_measure() policy decision. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_file_check(struct file *file, int mask) { struct lsm_prop prop; security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, NULL, 0, mask & (MAY_READ | MAY_WRITE | MAY_EXEC | MAY_APPEND), FILE_CHECK); } static int __ima_inode_hash(struct inode *inode, struct file *file, char *buf, size_t buf_size) { struct ima_iint_cache *iint = NULL, tmp_iint; int rc, hash_algo; if (ima_policy_flag) { iint = ima_iint_find(inode); if (iint) mutex_lock(&iint->mutex); } if ((!iint || !(iint->flags & IMA_COLLECTED)) && file) { if (iint) mutex_unlock(&iint->mutex); memset(&tmp_iint, 0, sizeof(tmp_iint)); mutex_init(&tmp_iint.mutex); rc = ima_collect_measurement(&tmp_iint, file, NULL, 0, ima_hash_algo, NULL); if (rc < 0) { /* ima_hash could be allocated in case of failure. */ if (rc != -ENOMEM) kfree(tmp_iint.ima_hash); return -EOPNOTSUPP; } iint = &tmp_iint; mutex_lock(&iint->mutex); } if (!iint) return -EOPNOTSUPP; /* * ima_file_hash can be called when ima_collect_measurement has still * not been called, we might not always have a hash. */ if (!iint->ima_hash || !(iint->flags & IMA_COLLECTED)) { mutex_unlock(&iint->mutex); return -EOPNOTSUPP; } if (buf) { size_t copied_size; copied_size = min_t(size_t, iint->ima_hash->length, buf_size); memcpy(buf, iint->ima_hash->digest, copied_size); } hash_algo = iint->ima_hash->algo; mutex_unlock(&iint->mutex); if (iint == &tmp_iint) kfree(iint->ima_hash); return hash_algo; } /** * ima_file_hash - return a measurement of the file * @file: pointer to the file * @buf: buffer in which to store the hash * @buf_size: length of the buffer * * On success, return the hash algorithm (as defined in the enum hash_algo). * If buf is not NULL, this function also outputs the hash into buf. * If the hash is larger than buf_size, then only buf_size bytes will be copied. * It generally just makes sense to pass a buffer capable of holding the largest * possible hash: IMA_MAX_DIGEST_SIZE. * The file hash returned is based on the entire file, including the appended * signature. * * If the measurement cannot be performed, return -EOPNOTSUPP. * If the parameters are incorrect, return -EINVAL. */ int ima_file_hash(struct file *file, char *buf, size_t buf_size) { if (!file) return -EINVAL; return __ima_inode_hash(file_inode(file), file, buf, buf_size); } EXPORT_SYMBOL_GPL(ima_file_hash); /** * ima_inode_hash - return the stored measurement if the inode has been hashed * and is in the iint cache. * @inode: pointer to the inode * @buf: buffer in which to store the hash * @buf_size: length of the buffer * * On success, return the hash algorithm (as defined in the enum hash_algo). * If buf is not NULL, this function also outputs the hash into buf. * If the hash is larger than buf_size, then only buf_size bytes will be copied. * It generally just makes sense to pass a buffer capable of holding the largest * possible hash: IMA_MAX_DIGEST_SIZE. * The hash returned is based on the entire contents, including the appended * signature. * * If IMA is disabled or if no measurement is available, return -EOPNOTSUPP. * If the parameters are incorrect, return -EINVAL. */ int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size) { if (!inode) return -EINVAL; return __ima_inode_hash(inode, NULL, buf, buf_size); } EXPORT_SYMBOL_GPL(ima_inode_hash); /** * ima_post_create_tmpfile - mark newly created tmpfile as new * @idmap: idmap of the mount the inode was found from * @inode: inode of the newly created tmpfile * * No measuring, appraising or auditing of newly created tmpfiles is needed. * Skip calling process_measurement(), but indicate which newly, created * tmpfiles are in policy. */ static void ima_post_create_tmpfile(struct mnt_idmap *idmap, struct inode *inode) { struct ima_iint_cache *iint; int must_appraise; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return; must_appraise = ima_must_appraise(idmap, inode, MAY_ACCESS, FILE_CHECK); if (!must_appraise) return; /* Nothing to do if we can't allocate memory */ iint = ima_inode_get(inode); if (!iint) return; /* needed for writing the security xattrs */ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); iint->ima_file_status = INTEGRITY_PASS; } /** * ima_post_path_mknod - mark as a new inode * @idmap: idmap of the mount the inode was found from * @dentry: newly created dentry * * Mark files created via the mknodat syscall as new, so that the * file data can be written later. */ static void ima_post_path_mknod(struct mnt_idmap *idmap, struct dentry *dentry) { struct ima_iint_cache *iint; struct inode *inode = dentry->d_inode; int must_appraise; if (!ima_policy_flag || !S_ISREG(inode->i_mode)) return; must_appraise = ima_must_appraise(idmap, inode, MAY_ACCESS, FILE_CHECK); if (!must_appraise) return; /* Nothing to do if we can't allocate memory */ iint = ima_inode_get(inode); if (!iint) return; /* needed for re-opening empty files */ iint->flags |= IMA_NEW_FILE; } /** * ima_read_file - pre-measure/appraise hook decision based on policy * @file: pointer to the file to be measured/appraised/audit * @read_id: caller identifier * @contents: whether a subsequent call will be made to ima_post_read_file() * * Permit reading a file based on policy. The policy rules are written * in terms of the policy identifier. Appraising the integrity of * a file requires a file descriptor. * * For permission return 0, otherwise return -EACCES. */ static int ima_read_file(struct file *file, enum kernel_read_file_id read_id, bool contents) { enum ima_hooks func; struct lsm_prop prop; /* * Do devices using pre-allocated memory run the risk of the * firmware being accessible to the device prior to the completion * of IMA's signature verification any more than when using two * buffers? It may be desirable to include the buffer address * in this API and walk all the dma_map_single() mappings to check. */ /* * There will be a call made to ima_post_read_file() with * a filled buffer, so we don't need to perform an extra * read early here. */ if (contents) return 0; /* Read entire file for all partial reads. */ func = read_idmap[read_id] ?: FILE_CHECK; security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, NULL, 0, MAY_READ, func); } const int read_idmap[READING_MAX_ID] = { [READING_FIRMWARE] = FIRMWARE_CHECK, [READING_MODULE] = MODULE_CHECK, [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK, [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK, [READING_POLICY] = POLICY_CHECK }; /** * ima_post_read_file - in memory collect/appraise/audit measurement * @file: pointer to the file to be measured/appraised/audit * @buf: pointer to in memory file contents * @size: size of in memory file contents * @read_id: caller identifier * * Measure/appraise/audit in memory file based on policy. Policy rules * are written in terms of a policy identifier. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_post_read_file(struct file *file, char *buf, loff_t size, enum kernel_read_file_id read_id) { enum ima_hooks func; struct lsm_prop prop; /* permit signed certs */ if (!file && read_id == READING_X509_CERTIFICATE) return 0; if (!file || !buf || size == 0) { /* should never happen */ if (ima_appraise & IMA_APPRAISE_ENFORCE) return -EACCES; return 0; } func = read_idmap[read_id] ?: FILE_CHECK; security_current_getlsmprop_subj(&prop); return process_measurement(file, current_cred(), &prop, buf, size, MAY_READ, func); } /** * ima_load_data - appraise decision based on policy * @id: kernel load data caller identifier * @contents: whether the full contents will be available in a later * call to ima_post_load_data(). * * Callers of this LSM hook can not measure, appraise, or audit the * data provided by userspace. Enforce policy rules requiring a file * signature (eg. kexec'ed kernel image). * * For permission return 0, otherwise return -EACCES. */ static int ima_load_data(enum kernel_load_data_id id, bool contents) { bool ima_enforce, sig_enforce; ima_enforce = (ima_appraise & IMA_APPRAISE_ENFORCE) == IMA_APPRAISE_ENFORCE; switch (id) { case LOADING_KEXEC_IMAGE: if (IS_ENABLED(CONFIG_KEXEC_SIG) && arch_ima_get_secureboot()) { pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n"); return -EACCES; } if (ima_enforce && (ima_appraise & IMA_APPRAISE_KEXEC)) { pr_err("impossible to appraise a kernel image without a file descriptor; try using kexec_file_load syscall.\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } break; case LOADING_FIRMWARE: if (ima_enforce && (ima_appraise & IMA_APPRAISE_FIRMWARE) && !contents) { pr_err("Prevent firmware sysfs fallback loading.\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } break; case LOADING_MODULE: sig_enforce = is_module_sig_enforced(); if (ima_enforce && (!sig_enforce && (ima_appraise & IMA_APPRAISE_MODULES))) { pr_err("impossible to appraise a module without a file descriptor. sig_enforce kernel parameter might help\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } break; default: break; } return 0; } /** * ima_post_load_data - appraise decision based on policy * @buf: pointer to in memory file contents * @size: size of in memory file contents * @load_id: kernel load data caller identifier * @description: @load_id-specific description of contents * * Measure/appraise/audit in memory buffer based on policy. Policy rules * are written in terms of a policy identifier. * * On success return 0. On integrity appraisal error, assuming the file * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ static int ima_post_load_data(char *buf, loff_t size, enum kernel_load_data_id load_id, char *description) { if (load_id == LOADING_FIRMWARE) { if ((ima_appraise & IMA_APPRAISE_FIRMWARE) && (ima_appraise & IMA_APPRAISE_ENFORCE)) { pr_err("Prevent firmware loading_store.\n"); return -EACCES; /* INTEGRITY_UNKNOWN */ } return 0; } /* * Measure the init_module syscall buffer containing the ELF image. */ if (load_id == LOADING_MODULE) ima_measure_critical_data("modules", "init_module", buf, size, true, NULL, 0); return 0; } /** * process_buffer_measurement - Measure the buffer or the buffer data hash * @idmap: idmap of the mount the inode was found from * @inode: inode associated with the object being measured (NULL for KEY_CHECK) * @buf: pointer to the buffer that needs to be added to the log. * @size: size of buffer(in bytes). * @eventname: event name to be used for the buffer entry. * @func: IMA hook * @pcr: pcr to extend the measurement * @func_data: func specific data, may be NULL * @buf_hash: measure buffer data hash * @digest: buffer digest will be written to * @digest_len: buffer length * * Based on policy, either the buffer data or buffer data hash is measured * * Return: 0 if the buffer has been successfully measured, 1 if the digest * has been written to the passed location but not added to a measurement entry, * a negative value otherwise. */ int process_buffer_measurement(struct mnt_idmap *idmap, struct inode *inode, const void *buf, int size, const char *eventname, enum ima_hooks func, int pcr, const char *func_data, bool buf_hash, u8 *digest, size_t digest_len) { int ret = 0; const char *audit_cause = "ENOMEM"; struct ima_template_entry *entry = NULL; struct ima_iint_cache iint = {}; struct ima_event_data event_data = {.iint = &iint, .filename = eventname, .buf = buf, .buf_len = size}; struct ima_template_desc *template; struct ima_max_digest_data hash; struct ima_digest_data *hash_hdr = container_of(&hash.hdr, struct ima_digest_data, hdr); char digest_hash[IMA_MAX_DIGEST_SIZE]; int digest_hash_len = hash_digest_size[ima_hash_algo]; int violation = 0; int action = 0; struct lsm_prop prop; if (digest && digest_len < digest_hash_len) return -EINVAL; if (!ima_policy_flag && !digest) return -ENOENT; template = ima_template_desc_buf(); if (!template) { ret = -EINVAL; audit_cause = "ima_template_desc_buf"; goto out; } /* * Both LSM hooks and auxiliary based buffer measurements are * based on policy. To avoid code duplication, differentiate * between the LSM hooks and auxiliary buffer measurements, * retrieving the policy rule information only for the LSM hook * buffer measurements. */ if (func) { security_current_getlsmprop_subj(&prop); action = ima_get_action(idmap, inode, current_cred(), &prop, 0, func, &pcr, &template, func_data, NULL); if (!(action & IMA_MEASURE) && !digest) return -ENOENT; } if (!pcr) pcr = CONFIG_IMA_MEASURE_PCR_IDX; iint.ima_hash = hash_hdr; iint.ima_hash->algo = ima_hash_algo; iint.ima_hash->length = hash_digest_size[ima_hash_algo]; ret = ima_calc_buffer_hash(buf, size, iint.ima_hash); if (ret < 0) { audit_cause = "hashing_error"; goto out; } if (buf_hash) { memcpy(digest_hash, hash_hdr->digest, digest_hash_len); ret = ima_calc_buffer_hash(digest_hash, digest_hash_len, iint.ima_hash); if (ret < 0) { audit_cause = "hashing_error"; goto out; } event_data.buf = digest_hash; event_data.buf_len = digest_hash_len; } if (digest) memcpy(digest, iint.ima_hash->digest, digest_hash_len); if (!ima_policy_flag || (func && !(action & IMA_MEASURE))) return 1; ret = ima_alloc_init_template(&event_data, &entry, template); if (ret < 0) { audit_cause = "alloc_entry"; goto out; } ret = ima_store_template(entry, violation, NULL, event_data.buf, pcr); if (ret < 0) { audit_cause = "store_entry"; ima_free_template_entry(entry); } out: if (ret < 0) integrity_audit_message(AUDIT_INTEGRITY_PCR, NULL, eventname, func_measure_str(func), audit_cause, ret, 0, ret); return ret; } /** * ima_kexec_cmdline - measure kexec cmdline boot args * @kernel_fd: file descriptor of the kexec kernel being loaded * @buf: pointer to buffer * @size: size of buffer * * Buffers can only be measured, not appraised. */ void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) { if (!buf || !size) return; CLASS(fd, f)(kernel_fd); if (fd_empty(f)) return; process_buffer_measurement(file_mnt_idmap(fd_file(f)), file_inode(fd_file(f)), buf, size, "kexec-cmdline", KEXEC_CMDLINE, 0, NULL, false, NULL, 0); } /** * ima_measure_critical_data - measure kernel integrity critical data * @event_label: unique event label for grouping and limiting critical data * @event_name: event name for the record in the IMA measurement list * @buf: pointer to buffer data * @buf_len: length of buffer data (in bytes) * @hash: measure buffer data hash * @digest: buffer digest will be written to * @digest_len: buffer length * * Measure data critical to the integrity of the kernel into the IMA log * and extend the pcr. Examples of critical data could be various data * structures, policies, and states stored in kernel memory that can * impact the integrity of the system. * * Return: 0 if the buffer has been successfully measured, 1 if the digest * has been written to the passed location but not added to a measurement entry, * a negative value otherwise. */ int ima_measure_critical_data(const char *event_label, const char *event_name, const void *buf, size_t buf_len, bool hash, u8 *digest, size_t digest_len) { if (!event_name || !event_label || !buf || !buf_len) return -ENOPARAM; return process_buffer_measurement(&nop_mnt_idmap, NULL, buf, buf_len, event_name, CRITICAL_DATA, 0, event_label, hash, digest, digest_len); } EXPORT_SYMBOL_GPL(ima_measure_critical_data); #ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS /** * ima_kernel_module_request - Prevent crypto-pkcs1(rsa,*) requests * @kmod_name: kernel module name * * Avoid a verification loop where verifying the signature of the modprobe * binary requires executing modprobe itself. Since the modprobe iint->mutex * is already held when the signature verification is performed, a deadlock * occurs as soon as modprobe is executed within the critical region, since * the same lock cannot be taken again. * * This happens when public_key_verify_signature(), in case of RSA algorithm, * use alg_name to store internal information in order to construct an * algorithm on the fly, but crypto_larval_lookup() will try to use alg_name * in order to load a kernel module with same name. * * Since we don't have any real "crypto-pkcs1(rsa,*)" kernel modules, * we are safe to fail such module request from crypto_larval_lookup(), and * avoid the verification loop. * * Return: Zero if it is safe to load the kernel module, -EINVAL otherwise. */ static int ima_kernel_module_request(char *kmod_name) { if (strncmp(kmod_name, "crypto-pkcs1(rsa,", 17) == 0) return -EINVAL; return 0; } #endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */ static int __init init_ima(void) { int error; ima_appraise_parse_cmdline(); ima_init_template_list(); hash_setup(CONFIG_IMA_DEFAULT_HASH); error = ima_init(); if (error && strcmp(hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH) != 0) { pr_info("Allocating %s failed, going to use default hash algorithm %s\n", hash_algo_name[ima_hash_algo], CONFIG_IMA_DEFAULT_HASH); hash_setup_done = 0; hash_setup(CONFIG_IMA_DEFAULT_HASH); error = ima_init(); } if (error) return error; error = register_blocking_lsm_notifier(&ima_lsm_policy_notifier); if (error) pr_warn("Couldn't register LSM notifier, error %d\n", error); if (!error) ima_update_policy_flags(); return error; } static struct security_hook_list ima_hooks[] __ro_after_init = { LSM_HOOK_INIT(bprm_check_security, ima_bprm_check), LSM_HOOK_INIT(bprm_creds_for_exec, ima_bprm_creds_for_exec), LSM_HOOK_INIT(file_post_open, ima_file_check), LSM_HOOK_INIT(inode_post_create_tmpfile, ima_post_create_tmpfile), LSM_HOOK_INIT(file_release, ima_file_free), LSM_HOOK_INIT(mmap_file, ima_file_mmap), LSM_HOOK_INIT(file_mprotect, ima_file_mprotect), LSM_HOOK_INIT(kernel_load_data, ima_load_data), LSM_HOOK_INIT(kernel_post_load_data, ima_post_load_data), LSM_HOOK_INIT(kernel_read_file, ima_read_file), LSM_HOOK_INIT(kernel_post_read_file, ima_post_read_file), LSM_HOOK_INIT(path_post_mknod, ima_post_path_mknod), #ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS LSM_HOOK_INIT(key_post_create_or_update, ima_post_key_create_or_update), #endif #ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS LSM_HOOK_INIT(kernel_module_request, ima_kernel_module_request), #endif LSM_HOOK_INIT(inode_free_security_rcu, ima_inode_free_rcu), }; static const struct lsm_id ima_lsmid = { .name = "ima", .id = LSM_ID_IMA, }; static int __init init_ima_lsm(void) { ima_iintcache_init(); security_add_hooks(ima_hooks, ARRAY_SIZE(ima_hooks), &ima_lsmid); init_ima_appraise_lsm(&ima_lsmid); return 0; } struct lsm_blob_sizes ima_blob_sizes __ro_after_init = { .lbs_inode = sizeof(struct ima_iint_cache *), }; DEFINE_LSM(ima) = { .name = "ima", .init = init_ima_lsm, .order = LSM_ORDER_LAST, .blobs = &ima_blob_sizes, }; late_initcall(init_ima); /* Start IMA after the TPM is available */ |
27 27 27 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | // SPDX-License-Identifier: GPL-2.0 /* Builtin firmware support */ #include <linux/firmware.h> #include "../firmware.h" /* Only if FW_LOADER=y */ #ifdef CONFIG_FW_LOADER struct builtin_fw { char *name; void *data; unsigned long size; }; extern struct builtin_fw __start_builtin_fw[]; extern struct builtin_fw __end_builtin_fw[]; static bool fw_copy_to_prealloc_buf(struct firmware *fw, void *buf, size_t size) { if (!buf) return true; if (size < fw->size) return false; memcpy(buf, fw->data, fw->size); return true; } /** * firmware_request_builtin() - load builtin firmware * @fw: pointer to firmware struct * @name: name of firmware file * * Some use cases in the kernel have a requirement so that no memory allocator * is involved as these calls take place early in boot process. An example is * the x86 CPU microcode loader. In these cases all the caller wants is to see * if the firmware was built-in and if so use it right away. This can be used * for such cases. * * This looks for the firmware in the built-in kernel. Only if the kernel was * built-in with the firmware you are looking for will this return successfully. * * Callers of this API do not need to use release_firmware() as the pointer to * the firmware is expected to be provided locally on the stack of the caller. **/ bool firmware_request_builtin(struct firmware *fw, const char *name) { struct builtin_fw *b_fw; if (!fw) return false; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) { if (strcmp(name, b_fw->name) == 0) { fw->size = b_fw->size; fw->data = b_fw->data; return true; } } return false; } EXPORT_SYMBOL_NS_GPL(firmware_request_builtin, "TEST_FIRMWARE"); /** * firmware_request_builtin_buf() - load builtin firmware into optional buffer * @fw: pointer to firmware struct * @name: name of firmware file * @buf: If set this lets you use a pre-allocated buffer so that the built-in * firmware into is copied into. This field can be NULL. It is used by * callers such as request_firmware_into_buf() and * request_partial_firmware_into_buf() * @size: if buf was provided, the max size of the allocated buffer available. * If the built-in firmware does not fit into the pre-allocated @buf this * call will fail. * * This looks for the firmware in the built-in kernel. Only if the kernel was * built-in with the firmware you are looking for will this call possibly * succeed. If you passed a @buf the firmware will be copied into it *iff* the * built-in firmware fits into the pre-allocated buffer size specified in * @size. * * This caller is to be used internally by the firmware_loader only. **/ bool firmware_request_builtin_buf(struct firmware *fw, const char *name, void *buf, size_t size) { if (!firmware_request_builtin(fw, name)) return false; return fw_copy_to_prealloc_buf(fw, buf, size); } bool firmware_is_builtin(const struct firmware *fw) { struct builtin_fw *b_fw; for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) if (fw->data == b_fw->data) return true; return false; } #endif |
18 18 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 | // SPDX-License-Identifier: GPL-2.0 /* * f2fs shrinker support * the basic infra was copied from fs/ubifs/shrinker.c * * Copyright (c) 2015 Motorola Mobility * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org> */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include "f2fs.h" #include "node.h" static LIST_HEAD(f2fs_list); static DEFINE_SPINLOCK(f2fs_list_lock); static unsigned int shrinker_run_no; static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi) { return NM_I(sbi)->nat_cnt[RECLAIMABLE_NAT]; } static unsigned long __count_free_nids(struct f2fs_sb_info *sbi) { long count = NM_I(sbi)->nid_cnt[FREE_NID] - MAX_FREE_NIDS; return count > 0 ? count : 0; } static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi, enum extent_type type) { struct extent_tree_info *eti = &sbi->extent_tree[type]; return atomic_read(&eti->total_zombie_tree) + atomic_read(&eti->total_ext_node); } unsigned long f2fs_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct f2fs_sb_info *sbi; struct list_head *p; unsigned long count = 0; spin_lock(&f2fs_list_lock); p = f2fs_list.next; while (p != &f2fs_list) { sbi = list_entry(p, struct f2fs_sb_info, s_list); /* stop f2fs_put_super */ if (!mutex_trylock(&sbi->umount_mutex)) { p = p->next; continue; } spin_unlock(&f2fs_list_lock); /* count read extent cache entries */ count += __count_extent_cache(sbi, EX_READ); /* count block age extent cache entries */ count += __count_extent_cache(sbi, EX_BLOCK_AGE); /* count clean nat cache entries */ count += __count_nat_entries(sbi); /* count free nids cache entries */ count += __count_free_nids(sbi); spin_lock(&f2fs_list_lock); p = p->next; mutex_unlock(&sbi->umount_mutex); } spin_unlock(&f2fs_list_lock); return count ?: SHRINK_EMPTY; } unsigned long f2fs_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { unsigned long nr = sc->nr_to_scan; struct f2fs_sb_info *sbi; struct list_head *p; unsigned int run_no; unsigned long freed = 0; spin_lock(&f2fs_list_lock); do { run_no = ++shrinker_run_no; } while (run_no == 0); p = f2fs_list.next; while (p != &f2fs_list) { sbi = list_entry(p, struct f2fs_sb_info, s_list); if (sbi->shrinker_run_no == run_no) break; /* stop f2fs_put_super */ if (!mutex_trylock(&sbi->umount_mutex)) { p = p->next; continue; } spin_unlock(&f2fs_list_lock); sbi->shrinker_run_no = run_no; /* shrink extent cache entries */ freed += f2fs_shrink_age_extent_tree(sbi, nr >> 2); /* shrink read extent cache entries */ freed += f2fs_shrink_read_extent_tree(sbi, nr >> 2); /* shrink clean nat cache entries */ if (freed < nr) freed += f2fs_try_to_free_nats(sbi, nr - freed); /* shrink free nids cache entries */ if (freed < nr) freed += f2fs_try_to_free_nids(sbi, nr - freed); spin_lock(&f2fs_list_lock); p = p->next; list_move_tail(&sbi->s_list, &f2fs_list); mutex_unlock(&sbi->umount_mutex); if (freed >= nr) break; } spin_unlock(&f2fs_list_lock); return freed; } unsigned int f2fs_donate_files(void) { struct f2fs_sb_info *sbi; struct list_head *p; unsigned int donate_files = 0; spin_lock(&f2fs_list_lock); p = f2fs_list.next; while (p != &f2fs_list) { sbi = list_entry(p, struct f2fs_sb_info, s_list); /* stop f2fs_put_super */ if (!mutex_trylock(&sbi->umount_mutex)) { p = p->next; continue; } spin_unlock(&f2fs_list_lock); donate_files += sbi->donate_files; spin_lock(&f2fs_list_lock); p = p->next; mutex_unlock(&sbi->umount_mutex); } spin_unlock(&f2fs_list_lock); return donate_files; } static unsigned int do_reclaim_caches(struct f2fs_sb_info *sbi, unsigned int reclaim_caches_kb) { struct inode *inode; struct f2fs_inode_info *fi; unsigned int nfiles = sbi->donate_files; pgoff_t npages = reclaim_caches_kb >> (PAGE_SHIFT - 10); while (npages && nfiles--) { pgoff_t len; spin_lock(&sbi->inode_lock[DONATE_INODE]); if (list_empty(&sbi->inode_list[DONATE_INODE])) { spin_unlock(&sbi->inode_lock[DONATE_INODE]); break; } fi = list_first_entry(&sbi->inode_list[DONATE_INODE], struct f2fs_inode_info, gdonate_list); list_move_tail(&fi->gdonate_list, &sbi->inode_list[DONATE_INODE]); inode = igrab(&fi->vfs_inode); spin_unlock(&sbi->inode_lock[DONATE_INODE]); if (!inode) continue; len = fi->donate_end - fi->donate_start + 1; npages = npages < len ? 0 : npages - len; invalidate_inode_pages2_range(inode->i_mapping, fi->donate_start, fi->donate_end); iput(inode); cond_resched(); } return npages << (PAGE_SHIFT - 10); } void f2fs_reclaim_caches(unsigned int reclaim_caches_kb) { struct f2fs_sb_info *sbi; struct list_head *p; spin_lock(&f2fs_list_lock); p = f2fs_list.next; while (p != &f2fs_list && reclaim_caches_kb) { sbi = list_entry(p, struct f2fs_sb_info, s_list); /* stop f2fs_put_super */ if (!mutex_trylock(&sbi->umount_mutex)) { p = p->next; continue; } spin_unlock(&f2fs_list_lock); reclaim_caches_kb = do_reclaim_caches(sbi, reclaim_caches_kb); spin_lock(&f2fs_list_lock); p = p->next; mutex_unlock(&sbi->umount_mutex); } spin_unlock(&f2fs_list_lock); } void f2fs_join_shrinker(struct f2fs_sb_info *sbi) { spin_lock(&f2fs_list_lock); list_add_tail(&sbi->s_list, &f2fs_list); spin_unlock(&f2fs_list_lock); } void f2fs_leave_shrinker(struct f2fs_sb_info *sbi) { f2fs_shrink_read_extent_tree(sbi, __count_extent_cache(sbi, EX_READ)); f2fs_shrink_age_extent_tree(sbi, __count_extent_cache(sbi, EX_BLOCK_AGE)); spin_lock(&f2fs_list_lock); list_del_init(&sbi->s_list); spin_unlock(&f2fs_list_lock); } |
10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 3 10 10 10 3 7 3 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 | // SPDX-License-Identifier: GPL-2.0 /* * power_supply_hwmon.c - power supply hwmon support. */ #include <linux/err.h> #include <linux/hwmon.h> #include <linux/power_supply.h> #include <linux/slab.h> #include "power_supply.h" struct power_supply_hwmon { struct power_supply *psy; unsigned long *props; }; static const char *const ps_temp_label[] = { "temp", "ambient temp", }; static int power_supply_hwmon_in_to_property(u32 attr) { switch (attr) { case hwmon_in_average: return POWER_SUPPLY_PROP_VOLTAGE_AVG; case hwmon_in_min: return POWER_SUPPLY_PROP_VOLTAGE_MIN; case hwmon_in_max: return POWER_SUPPLY_PROP_VOLTAGE_MAX; case hwmon_in_input: return POWER_SUPPLY_PROP_VOLTAGE_NOW; default: return -EINVAL; } } static int power_supply_hwmon_curr_to_property(u32 attr) { switch (attr) { case hwmon_curr_average: return POWER_SUPPLY_PROP_CURRENT_AVG; case hwmon_curr_max: return POWER_SUPPLY_PROP_CURRENT_MAX; case hwmon_curr_input: return POWER_SUPPLY_PROP_CURRENT_NOW; default: return -EINVAL; } } static int power_supply_hwmon_power_to_property(u32 attr) { switch (attr) { case hwmon_power_input: return POWER_SUPPLY_PROP_POWER_NOW; case hwmon_power_average: return POWER_SUPPLY_PROP_POWER_AVG; default: return -EINVAL; } } static int power_supply_hwmon_temp_to_property(u32 attr, int channel) { if (channel) { switch (attr) { case hwmon_temp_input: return POWER_SUPPLY_PROP_TEMP_AMBIENT; case hwmon_temp_min_alarm: return POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN; case hwmon_temp_max_alarm: return POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX; default: break; } } else { switch (attr) { case hwmon_temp_input: return POWER_SUPPLY_PROP_TEMP; case hwmon_temp_max: return POWER_SUPPLY_PROP_TEMP_MAX; case hwmon_temp_min: return POWER_SUPPLY_PROP_TEMP_MIN; case hwmon_temp_min_alarm: return POWER_SUPPLY_PROP_TEMP_ALERT_MIN; case hwmon_temp_max_alarm: return POWER_SUPPLY_PROP_TEMP_ALERT_MAX; default: break; } } return -EINVAL; } static int power_supply_hwmon_to_property(enum hwmon_sensor_types type, u32 attr, int channel) { switch (type) { case hwmon_in: return power_supply_hwmon_in_to_property(attr); case hwmon_curr: return power_supply_hwmon_curr_to_property(attr); case hwmon_power: return power_supply_hwmon_power_to_property(attr); case hwmon_temp: return power_supply_hwmon_temp_to_property(attr, channel); default: return -EINVAL; } } static bool power_supply_hwmon_is_a_label(enum hwmon_sensor_types type, u32 attr) { return type == hwmon_temp && attr == hwmon_temp_label; } struct hwmon_type_attr_list { const u32 *attrs; size_t n_attrs; }; static const u32 ps_temp_attrs[] = { hwmon_temp_input, hwmon_temp_min, hwmon_temp_max, hwmon_temp_min_alarm, hwmon_temp_max_alarm, }; static const struct hwmon_type_attr_list ps_type_attrs[hwmon_max] = { [hwmon_temp] = { ps_temp_attrs, ARRAY_SIZE(ps_temp_attrs) }, }; static bool power_supply_hwmon_has_input( const struct power_supply_hwmon *psyhw, enum hwmon_sensor_types type, int channel) { const struct hwmon_type_attr_list *attr_list = &ps_type_attrs[type]; size_t i; for (i = 0; i < attr_list->n_attrs; ++i) { int prop = power_supply_hwmon_to_property(type, attr_list->attrs[i], channel); if (prop >= 0 && test_bit(prop, psyhw->props)) return true; } return false; } static bool power_supply_hwmon_is_writable(enum hwmon_sensor_types type, u32 attr) { switch (type) { case hwmon_in: return attr == hwmon_in_min || attr == hwmon_in_max; case hwmon_curr: return attr == hwmon_curr_max; case hwmon_temp: return attr == hwmon_temp_max || attr == hwmon_temp_min || attr == hwmon_temp_min_alarm || attr == hwmon_temp_max_alarm; default: return false; } } static umode_t power_supply_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { const struct power_supply_hwmon *psyhw = data; int prop; if (power_supply_hwmon_is_a_label(type, attr)) { if (power_supply_hwmon_has_input(psyhw, type, channel)) return 0444; else return 0; } prop = power_supply_hwmon_to_property(type, attr, channel); if (prop < 0 || !test_bit(prop, psyhw->props)) return 0; if (power_supply_property_is_writeable(psyhw->psy, prop) > 0 && power_supply_hwmon_is_writable(type, attr)) return 0644; return 0444; } static int power_supply_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { switch (type) { case hwmon_temp: *str = ps_temp_label[channel]; break; default: /* unreachable, but see: * gcc bug #51513 [1] and clang bug #978 [2] * * [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51513 * [2] https://github.com/ClangBuiltLinux/linux/issues/978 */ break; } return 0; } static int power_supply_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct power_supply_hwmon *psyhw = dev_get_drvdata(dev); struct power_supply *psy = psyhw->psy; union power_supply_propval pspval; int ret, prop; prop = power_supply_hwmon_to_property(type, attr, channel); if (prop < 0) return prop; ret = power_supply_get_property(psy, prop, &pspval); if (ret) return ret; switch (type) { /* * Both voltage and current is reported in units of * microvolts/microamps, so we need to adjust it to * milliamps(volts) */ case hwmon_curr: case hwmon_in: pspval.intval = DIV_ROUND_CLOSEST(pspval.intval, 1000); break; case hwmon_power: /* * Power properties are already in microwatts. */ break; /* * Temp needs to be converted from 1/10 C to milli-C */ case hwmon_temp: if (check_mul_overflow(pspval.intval, 100, &pspval.intval)) return -EOVERFLOW; break; default: return -EINVAL; } *val = pspval.intval; return 0; } static int power_supply_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { struct power_supply_hwmon *psyhw = dev_get_drvdata(dev); struct power_supply *psy = psyhw->psy; union power_supply_propval pspval; int prop; prop = power_supply_hwmon_to_property(type, attr, channel); if (prop < 0) return prop; pspval.intval = val; switch (type) { /* * Both voltage and current is reported in units of * microvolts/microamps, so we need to adjust it to * milliamps(volts) */ case hwmon_curr: case hwmon_in: if (check_mul_overflow(pspval.intval, 1000, &pspval.intval)) return -EOVERFLOW; break; /* * Temp needs to be converted from 1/10 C to milli-C */ case hwmon_temp: pspval.intval = DIV_ROUND_CLOSEST(pspval.intval, 100); break; default: return -EINVAL; } return power_supply_set_property(psy, prop, &pspval); } static const struct hwmon_ops power_supply_hwmon_ops = { .is_visible = power_supply_hwmon_is_visible, .read = power_supply_hwmon_read, .write = power_supply_hwmon_write, .read_string = power_supply_hwmon_read_string, }; static const struct hwmon_channel_info * const power_supply_hwmon_info[] = { HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM, HWMON_T_LABEL | HWMON_T_INPUT | HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM), HWMON_CHANNEL_INFO(curr, HWMON_C_AVERAGE | HWMON_C_MAX | HWMON_C_INPUT), HWMON_CHANNEL_INFO(power, HWMON_P_INPUT | HWMON_P_AVERAGE), HWMON_CHANNEL_INFO(in, HWMON_I_AVERAGE | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_INPUT), NULL }; static const struct hwmon_chip_info power_supply_hwmon_chip_info = { .ops = &power_supply_hwmon_ops, .info = power_supply_hwmon_info, }; static const enum power_supply_property power_supply_hwmon_props[] = { POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CURRENT_MAX, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_POWER_AVG, POWER_SUPPLY_PROP_POWER_NOW, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MAX, POWER_SUPPLY_PROP_TEMP_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_VOLTAGE_NOW, }; int power_supply_add_hwmon_sysfs(struct power_supply *psy) { struct power_supply_hwmon *psyhw; struct device *dev = &psy->dev; struct device *hwmon; int ret, i; const char *name; if (!devres_open_group(dev, power_supply_add_hwmon_sysfs, GFP_KERNEL)) return -ENOMEM; psyhw = devm_kzalloc(dev, sizeof(*psyhw), GFP_KERNEL); if (!psyhw) { ret = -ENOMEM; goto error; } psyhw->psy = psy; psyhw->props = devm_bitmap_zalloc(dev, POWER_SUPPLY_PROP_TIME_TO_FULL_AVG + 1, GFP_KERNEL); if (!psyhw->props) { ret = -ENOMEM; goto error; } for (i = 0; i < ARRAY_SIZE(power_supply_hwmon_props); i++) { const enum power_supply_property prop = power_supply_hwmon_props[i]; if (power_supply_has_property(psy, prop)) set_bit(prop, psyhw->props); } name = psy->desc->name; if (strchr(name, '-')) { char *new_name; new_name = devm_kstrdup(dev, name, GFP_KERNEL); if (!new_name) { ret = -ENOMEM; goto error; } strreplace(new_name, '-', '_'); name = new_name; } hwmon = devm_hwmon_device_register_with_info(dev, name, psyhw, &power_supply_hwmon_chip_info, NULL); ret = PTR_ERR_OR_ZERO(hwmon); if (ret) goto error; devres_close_group(dev, power_supply_add_hwmon_sysfs); return 0; error: devres_release_group(dev, NULL); return ret; } void power_supply_remove_hwmon_sysfs(struct power_supply *psy) { devres_release_group(&psy->dev, power_supply_add_hwmon_sysfs); } |
2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * fwnode.h - Firmware device node object handle type definition. * * Copyright (C) 2015, Intel Corporation * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> */ #ifndef _LINUX_FWNODE_H_ #define _LINUX_FWNODE_H_ #include <linux/bits.h> #include <linux/err.h> #include <linux/list.h> #include <linux/types.h> enum dev_dma_attr { DEV_DMA_NOT_SUPPORTED, DEV_DMA_NON_COHERENT, DEV_DMA_COHERENT, }; struct fwnode_operations; struct device; /* * fwnode flags * * LINKS_ADDED: The fwnode has already be parsed to add fwnode links. * NOT_DEVICE: The fwnode will never be populated as a struct device. * INITIALIZED: The hardware corresponding to fwnode has been initialized. * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its * driver needs its child devices to be bound with * their respective drivers as soon as they are * added. * BEST_EFFORT: The fwnode/device needs to probe early and might be missing some * suppliers. Only enforce ordering with suppliers that have * drivers. */ #define FWNODE_FLAG_LINKS_ADDED BIT(0) #define FWNODE_FLAG_NOT_DEVICE BIT(1) #define FWNODE_FLAG_INITIALIZED BIT(2) #define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3) #define FWNODE_FLAG_BEST_EFFORT BIT(4) #define FWNODE_FLAG_VISITED BIT(5) struct fwnode_handle { struct fwnode_handle *secondary; const struct fwnode_operations *ops; /* The below is used solely by device links, don't use otherwise */ struct device *dev; struct list_head suppliers; struct list_head consumers; u8 flags; }; /* * fwnode link flags * * CYCLE: The fwnode link is part of a cycle. Don't defer probe. * IGNORE: Completely ignore this link, even during cycle detection. */ #define FWLINK_FLAG_CYCLE BIT(0) #define FWLINK_FLAG_IGNORE BIT(1) struct fwnode_link { struct fwnode_handle *supplier; struct list_head s_hook; struct fwnode_handle *consumer; struct list_head c_hook; u8 flags; }; /** * struct fwnode_endpoint - Fwnode graph endpoint * @port: Port number * @id: Endpoint id * @local_fwnode: reference to the related fwnode */ struct fwnode_endpoint { unsigned int port; unsigned int id; const struct fwnode_handle *local_fwnode; }; /* * ports and endpoints defined as software_nodes should all follow a common * naming scheme; use these macros to ensure commonality. */ #define SWNODE_GRAPH_PORT_NAME_FMT "port@%u" #define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u" #define NR_FWNODE_REFERENCE_ARGS 16 /** * struct fwnode_reference_args - Fwnode reference with additional arguments * @fwnode:- A reference to the base fwnode * @nargs: Number of elements in @args array * @args: Integer arguments on the fwnode */ struct fwnode_reference_args { struct fwnode_handle *fwnode; unsigned int nargs; u64 args[NR_FWNODE_REFERENCE_ARGS]; }; /** * struct fwnode_operations - Operations for fwnode interface * @get: Get a reference to an fwnode. * @put: Put a reference to an fwnode. * @device_is_available: Return true if the device is available. * @device_get_match_data: Return the device driver match data. * @property_present: Return true if a property is present. * @property_read_bool: Return a boolean property value. * @property_read_int_array: Read an array of integer properties. Return zero on * success, a negative error code otherwise. * @property_read_string_array: Read an array of string properties. Return zero * on success, a negative error code otherwise. * @get_name: Return the name of an fwnode. * @get_name_prefix: Get a prefix for a node (for printing purposes). * @get_parent: Return the parent of an fwnode. * @get_next_child_node: Return the next child node in an iteration. * @get_named_child_node: Return a child node with a given name. * @get_reference_args: Return a reference pointed to by a property, with args * @graph_get_next_endpoint: Return an endpoint node in an iteration. * @graph_get_remote_endpoint: Return the remote endpoint node of a local * endpoint node. * @graph_get_port_parent: Return the parent node of a port node. * @graph_parse_endpoint: Parse endpoint for port and endpoint id. * @add_links: Create fwnode links to all the suppliers of the fwnode. Return * zero on success, a negative error code otherwise. */ struct fwnode_operations { struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); void (*put)(struct fwnode_handle *fwnode); bool (*device_is_available)(const struct fwnode_handle *fwnode); const void *(*device_get_match_data)(const struct fwnode_handle *fwnode, const struct device *dev); bool (*device_dma_supported)(const struct fwnode_handle *fwnode); enum dev_dma_attr (*device_get_dma_attr)(const struct fwnode_handle *fwnode); bool (*property_present)(const struct fwnode_handle *fwnode, const char *propname); bool (*property_read_bool)(const struct fwnode_handle *fwnode, const char *propname); int (*property_read_int_array)(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, size_t nval); int (*property_read_string_array)(const struct fwnode_handle *fwnode_handle, const char *propname, const char **val, size_t nval); const char *(*get_name)(const struct fwnode_handle *fwnode); const char *(*get_name_prefix)(const struct fwnode_handle *fwnode); struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode); struct fwnode_handle * (*get_next_child_node)(const struct fwnode_handle *fwnode, struct fwnode_handle *child); struct fwnode_handle * (*get_named_child_node)(const struct fwnode_handle *fwnode, const char *name); int (*get_reference_args)(const struct fwnode_handle *fwnode, const char *prop, const char *nargs_prop, unsigned int nargs, unsigned int index, struct fwnode_reference_args *args); struct fwnode_handle * (*graph_get_next_endpoint)(const struct fwnode_handle *fwnode, struct fwnode_handle *prev); struct fwnode_handle * (*graph_get_remote_endpoint)(const struct fwnode_handle *fwnode); struct fwnode_handle * (*graph_get_port_parent)(struct fwnode_handle *fwnode); int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint); void __iomem *(*iomap)(struct fwnode_handle *fwnode, int index); int (*irq_get)(const struct fwnode_handle *fwnode, unsigned int index); int (*add_links)(struct fwnode_handle *fwnode); }; #define fwnode_has_op(fwnode, op) \ (!IS_ERR_OR_NULL(fwnode) && (fwnode)->ops && (fwnode)->ops->op) #define fwnode_call_int_op(fwnode, op, ...) \ (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : (IS_ERR_OR_NULL(fwnode) ? -EINVAL : -ENXIO)) #define fwnode_call_bool_op(fwnode, op, ...) \ (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : false) #define fwnode_call_ptr_op(fwnode, op, ...) \ (fwnode_has_op(fwnode, op) ? \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__) : NULL) #define fwnode_call_void_op(fwnode, op, ...) \ do { \ if (fwnode_has_op(fwnode, op)) \ (fwnode)->ops->op(fwnode, ## __VA_ARGS__); \ } while (false) static inline void fwnode_init(struct fwnode_handle *fwnode, const struct fwnode_operations *ops) { fwnode->ops = ops; INIT_LIST_HEAD(&fwnode->consumers); INIT_LIST_HEAD(&fwnode->suppliers); } static inline void fwnode_dev_initialized(struct fwnode_handle *fwnode, bool initialized) { if (IS_ERR_OR_NULL(fwnode)) return; if (initialized) fwnode->flags |= FWNODE_FLAG_INITIALIZED; else fwnode->flags &= ~FWNODE_FLAG_INITIALIZED; } int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup, u8 flags); void fwnode_links_purge(struct fwnode_handle *fwnode); void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode); bool fw_devlink_is_strict(void); #endif |
49 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM dccp #if !defined(_TRACE_DCCP_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_DCCP_H #include <net/sock.h> #include "dccp.h" #include "ccids/ccid3.h" #include <linux/tracepoint.h> #include <trace/events/net_probe_common.h> TRACE_EVENT(dccp_probe, TP_PROTO(struct sock *sk, size_t size), TP_ARGS(sk, size), TP_STRUCT__entry( /* sockaddr_in6 is always bigger than sockaddr_in */ __array(__u8, saddr, sizeof(struct sockaddr_in6)) __array(__u8, daddr, sizeof(struct sockaddr_in6)) __field(__u16, sport) __field(__u16, dport) __field(__u16, size) __field(__u16, tx_s) __field(__u32, tx_rtt) __field(__u32, tx_p) __field(__u32, tx_x_calc) __field(__u64, tx_x_recv) __field(__u64, tx_x) __field(__u32, tx_t_ipi) ), TP_fast_assign( const struct inet_sock *inet = inet_sk(sk); struct ccid3_hc_tx_sock *hc = NULL; if (ccid_get_current_tx_ccid(dccp_sk(sk)) == DCCPC_CCID3) hc = ccid3_hc_tx_sk(sk); memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); TP_STORE_ADDR_PORTS(__entry, inet, sk); /* For filtering use */ __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); __entry->size = size; if (hc) { __entry->tx_s = hc->tx_s; __entry->tx_rtt = hc->tx_rtt; __entry->tx_p = hc->tx_p; __entry->tx_x_calc = hc->tx_x_calc; __entry->tx_x_recv = hc->tx_x_recv >> 6; __entry->tx_x = hc->tx_x >> 6; __entry->tx_t_ipi = hc->tx_t_ipi; } else { __entry->tx_s = 0; memset_startat(__entry, 0, tx_rtt); } ), TP_printk("src=%pISpc dest=%pISpc size=%d tx_s=%d tx_rtt=%d " "tx_p=%d tx_x_calc=%u tx_x_recv=%llu tx_x=%llu tx_t_ipi=%d", __entry->saddr, __entry->daddr, __entry->size, __entry->tx_s, __entry->tx_rtt, __entry->tx_p, __entry->tx_x_calc, __entry->tx_x_recv, __entry->tx_x, __entry->tx_t_ipi) ); #endif /* _TRACE_TCP_H */ /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace #include <trace/define_trace.h> |
1 24 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 | /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2013 Trond Myklebust <Trond.Myklebust@netapp.com> */ #undef TRACE_SYSTEM #define TRACE_SYSTEM nfs #if !defined(_TRACE_NFS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_NFS_H #include <linux/tracepoint.h> #include <linux/iversion.h> #include <trace/misc/fs.h> #include <trace/misc/nfs.h> #include <trace/misc/sunrpc.h> #define nfs_show_cache_validity(v) \ __print_flags(v, "|", \ { NFS_INO_INVALID_DATA, "INVALID_DATA" }, \ { NFS_INO_INVALID_ATIME, "INVALID_ATIME" }, \ { NFS_INO_INVALID_ACCESS, "INVALID_ACCESS" }, \ { NFS_INO_INVALID_ACL, "INVALID_ACL" }, \ { NFS_INO_REVAL_FORCED, "REVAL_FORCED" }, \ { NFS_INO_INVALID_LABEL, "INVALID_LABEL" }, \ { NFS_INO_INVALID_CHANGE, "INVALID_CHANGE" }, \ { NFS_INO_INVALID_CTIME, "INVALID_CTIME" }, \ { NFS_INO_INVALID_MTIME, "INVALID_MTIME" }, \ { NFS_INO_INVALID_SIZE, "INVALID_SIZE" }, \ { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }, \ { NFS_INO_DATA_INVAL_DEFER, "DATA_INVAL_DEFER" }, \ { NFS_INO_INVALID_BLOCKS, "INVALID_BLOCKS" }, \ { NFS_INO_INVALID_XATTR, "INVALID_XATTR" }, \ { NFS_INO_INVALID_NLINK, "INVALID_NLINK" }, \ { NFS_INO_INVALID_MODE, "INVALID_MODE" }) #define nfs_show_nfsi_flags(v) \ __print_flags(v, "|", \ { BIT(NFS_INO_STALE), "STALE" }, \ { BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \ { BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \ { BIT(NFS_INO_LAYOUTCOMMIT), "NEED_LAYOUTCOMMIT" }, \ { BIT(NFS_INO_LAYOUTCOMMITTING), "LAYOUTCOMMIT" }, \ { BIT(NFS_INO_LAYOUTSTATS), "LAYOUTSTATS" }, \ { BIT(NFS_INO_ODIRECT), "ODIRECT" }) DECLARE_EVENT_CLASS(nfs_inode_event, TP_PROTO( const struct inode *inode ), TP_ARGS(inode), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(u64, version) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu ", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (unsigned long long)__entry->version ) ); DECLARE_EVENT_CLASS(nfs_inode_event_done, TP_PROTO( const struct inode *inode, int error ), TP_ARGS(inode, error), TP_STRUCT__entry( __field(unsigned long, error) __field(dev_t, dev) __field(u32, fhandle) __field(unsigned char, type) __field(u64, fileid) __field(u64, version) __field(loff_t, size) __field(unsigned long, nfsi_flags) __field(unsigned long, cache_validity) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->error = error < 0 ? -error : 0; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->type = nfs_umode_to_dtype(inode->i_mode); __entry->version = inode_peek_iversion_raw(inode); __entry->size = i_size_read(inode); __entry->nfsi_flags = nfsi->flags; __entry->cache_validity = nfsi->cache_validity; ), TP_printk( "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "type=%u (%s) version=%llu size=%lld " "cache_validity=0x%lx (%s) nfs_flags=0x%lx (%s)", -__entry->error, show_nfs_status(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->type, show_fs_dirent_type(__entry->type), (unsigned long long)__entry->version, (long long)__entry->size, __entry->cache_validity, nfs_show_cache_validity(__entry->cache_validity), __entry->nfsi_flags, nfs_show_nfsi_flags(__entry->nfsi_flags) ) ); #define DEFINE_NFS_INODE_EVENT(name) \ DEFINE_EVENT(nfs_inode_event, name, \ TP_PROTO( \ const struct inode *inode \ ), \ TP_ARGS(inode)) #define DEFINE_NFS_INODE_EVENT_DONE(name) \ DEFINE_EVENT(nfs_inode_event_done, name, \ TP_PROTO( \ const struct inode *inode, \ int error \ ), \ TP_ARGS(inode, error)) DEFINE_NFS_INODE_EVENT(nfs_set_inode_stale); DEFINE_NFS_INODE_EVENT(nfs_refresh_inode_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_refresh_inode_exit); DEFINE_NFS_INODE_EVENT(nfs_revalidate_inode_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_revalidate_inode_exit); DEFINE_NFS_INODE_EVENT(nfs_invalidate_mapping_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_invalidate_mapping_exit); DEFINE_NFS_INODE_EVENT(nfs_getattr_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_getattr_exit); DEFINE_NFS_INODE_EVENT(nfs_setattr_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_setattr_exit); DEFINE_NFS_INODE_EVENT(nfs_writeback_inode_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit); DEFINE_NFS_INODE_EVENT(nfs_fsync_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit); DEFINE_NFS_INODE_EVENT(nfs_access_enter); DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid); DEFINE_NFS_INODE_EVENT(nfs_readdir_force_readdirplus); DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_cache_fill_done); DEFINE_NFS_INODE_EVENT_DONE(nfs_readdir_uncached_done); TRACE_EVENT(nfs_access_exit, TP_PROTO( const struct inode *inode, unsigned int mask, unsigned int permitted, int error ), TP_ARGS(inode, mask, permitted, error), TP_STRUCT__entry( __field(unsigned long, error) __field(dev_t, dev) __field(u32, fhandle) __field(unsigned char, type) __field(u64, fileid) __field(u64, version) __field(loff_t, size) __field(unsigned long, nfsi_flags) __field(unsigned long, cache_validity) __field(unsigned int, mask) __field(unsigned int, permitted) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->error = error < 0 ? -error : 0; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->type = nfs_umode_to_dtype(inode->i_mode); __entry->version = inode_peek_iversion_raw(inode); __entry->size = i_size_read(inode); __entry->nfsi_flags = nfsi->flags; __entry->cache_validity = nfsi->cache_validity; __entry->mask = mask; __entry->permitted = permitted; ), TP_printk( "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " "type=%u (%s) version=%llu size=%lld " "cache_validity=0x%lx (%s) nfs_flags=0x%lx (%s) " "mask=0x%x permitted=0x%x", -__entry->error, show_nfs_status(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->type, show_fs_dirent_type(__entry->type), (unsigned long long)__entry->version, (long long)__entry->size, __entry->cache_validity, nfs_show_cache_validity(__entry->cache_validity), __entry->nfsi_flags, nfs_show_nfsi_flags(__entry->nfsi_flags), __entry->mask, __entry->permitted ) ); DECLARE_EVENT_CLASS(nfs_update_size_class, TP_PROTO( const struct inode *inode, loff_t new_size ), TP_ARGS(inode, new_size), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(u64, version) __field(loff_t, cur_size) __field(loff_t, new_size) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->dev = inode->i_sb->s_dev; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->fileid = nfsi->fileid; __entry->version = inode_peek_iversion_raw(inode); __entry->cur_size = i_size_read(inode); __entry->new_size = new_size; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu cursize=%lld newsize=%lld", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, __entry->cur_size, __entry->new_size ) ); #define DEFINE_NFS_UPDATE_SIZE_EVENT(name) \ DEFINE_EVENT(nfs_update_size_class, nfs_size_##name, \ TP_PROTO( \ const struct inode *inode, \ loff_t new_size \ ), \ TP_ARGS(inode, new_size)) DEFINE_NFS_UPDATE_SIZE_EVENT(truncate); DEFINE_NFS_UPDATE_SIZE_EVENT(wcc); DEFINE_NFS_UPDATE_SIZE_EVENT(update); DEFINE_NFS_UPDATE_SIZE_EVENT(grow); DECLARE_EVENT_CLASS(nfs_inode_range_event, TP_PROTO( const struct inode *inode, loff_t range_start, loff_t range_end ), TP_ARGS(inode, range_start, range_end), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(u64, version) __field(loff_t, range_start) __field(loff_t, range_end) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->dev = inode->i_sb->s_dev; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->fileid = nfsi->fileid; __entry->version = inode_peek_iversion_raw(inode); __entry->range_start = range_start; __entry->range_end = range_end; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu " "range=[%lld, %lld]", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, __entry->range_start, __entry->range_end ) ); #define DEFINE_NFS_INODE_RANGE_EVENT(name) \ DEFINE_EVENT(nfs_inode_range_event, name, \ TP_PROTO( \ const struct inode *inode, \ loff_t range_start, \ loff_t range_end \ ), \ TP_ARGS(inode, range_start, range_end)) DEFINE_NFS_INODE_RANGE_EVENT(nfs_readdir_invalidate_cache_range); DECLARE_EVENT_CLASS(nfs_readdir_event, TP_PROTO( const struct file *file, const __be32 *verifier, u64 cookie, pgoff_t page_index, unsigned int dtsize ), TP_ARGS(file, verifier, cookie, page_index, dtsize), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(u64, version) __array(char, verifier, NFS4_VERIFIER_SIZE) __field(u64, cookie) __field(pgoff_t, index) __field(unsigned int, dtsize) ), TP_fast_assign( const struct inode *dir = file_inode(file); const struct nfs_inode *nfsi = NFS_I(dir); __entry->dev = dir->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(dir); if (cookie != 0) memcpy(__entry->verifier, verifier, NFS4_VERIFIER_SIZE); else memset(__entry->verifier, 0, NFS4_VERIFIER_SIZE); __entry->cookie = cookie; __entry->index = page_index; __entry->dtsize = dtsize; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu " "cookie=%s:0x%llx cache_index=%lu dtsize=%u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, show_nfs4_verifier(__entry->verifier), (unsigned long long)__entry->cookie, __entry->index, __entry->dtsize ) ); #define DEFINE_NFS_READDIR_EVENT(name) \ DEFINE_EVENT(nfs_readdir_event, name, \ TP_PROTO( \ const struct file *file, \ const __be32 *verifier, \ u64 cookie, \ pgoff_t page_index, \ unsigned int dtsize \ ), \ TP_ARGS(file, verifier, cookie, page_index, dtsize)) DEFINE_NFS_READDIR_EVENT(nfs_readdir_cache_fill); DEFINE_NFS_READDIR_EVENT(nfs_readdir_uncached); DECLARE_EVENT_CLASS(nfs_lookup_event, TP_PROTO( const struct inode *dir, const struct dentry *dentry, unsigned int flags ), TP_ARGS(dir, dentry, flags), TP_STRUCT__entry( __field(unsigned long, flags) __field(dev_t, dev) __field(u64, dir) __field(u64, fileid) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __entry->flags = flags; __entry->fileid = d_is_negative(dentry) ? 0 : NFS_FILEID(d_inode(dentry)); __assign_str(name); ), TP_printk( "flags=0x%lx (%s) name=%02x:%02x:%llu/%s fileid=%llu", __entry->flags, show_fs_lookup_flags(__entry->flags), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name), __entry->fileid ) ); #define DEFINE_NFS_LOOKUP_EVENT(name) \ DEFINE_EVENT(nfs_lookup_event, name, \ TP_PROTO( \ const struct inode *dir, \ const struct dentry *dentry, \ unsigned int flags \ ), \ TP_ARGS(dir, dentry, flags)) DECLARE_EVENT_CLASS(nfs_lookup_event_done, TP_PROTO( const struct inode *dir, const struct dentry *dentry, unsigned int flags, int error ), TP_ARGS(dir, dentry, flags, error), TP_STRUCT__entry( __field(unsigned long, error) __field(unsigned long, flags) __field(dev_t, dev) __field(u64, dir) __field(u64, fileid) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __entry->error = error < 0 ? -error : 0; __entry->flags = flags; __entry->fileid = d_is_negative(dentry) ? 0 : NFS_FILEID(d_inode(dentry)); __assign_str(name); ), TP_printk( "error=%ld (%s) flags=0x%lx (%s) name=%02x:%02x:%llu/%s fileid=%llu", -__entry->error, show_nfs_status(__entry->error), __entry->flags, show_fs_lookup_flags(__entry->flags), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name), __entry->fileid ) ); #define DEFINE_NFS_LOOKUP_EVENT_DONE(name) \ DEFINE_EVENT(nfs_lookup_event_done, name, \ TP_PROTO( \ const struct inode *dir, \ const struct dentry *dentry, \ unsigned int flags, \ int error \ ), \ TP_ARGS(dir, dentry, flags, error)) DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_enter); DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_exit); DEFINE_NFS_LOOKUP_EVENT(nfs_lookup_revalidate_enter); DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_lookup_revalidate_exit); DEFINE_NFS_LOOKUP_EVENT(nfs_readdir_lookup); DEFINE_NFS_LOOKUP_EVENT(nfs_readdir_lookup_revalidate_failed); DEFINE_NFS_LOOKUP_EVENT_DONE(nfs_readdir_lookup_revalidate); TRACE_EVENT(nfs_atomic_open_enter, TP_PROTO( const struct inode *dir, const struct nfs_open_context *ctx, unsigned int flags ), TP_ARGS(dir, ctx, flags), TP_STRUCT__entry( __field(unsigned long, flags) __field(unsigned long, fmode) __field(dev_t, dev) __field(u64, dir) __string(name, ctx->dentry->d_name.name) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __entry->flags = flags; __entry->fmode = (__force unsigned long)ctx->mode; __assign_str(name); ), TP_printk( "flags=0x%lx (%s) fmode=%s name=%02x:%02x:%llu/%s", __entry->flags, show_fs_fcntl_open_flags(__entry->flags), show_fs_fmode_flags(__entry->fmode), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); TRACE_EVENT(nfs_atomic_open_exit, TP_PROTO( const struct inode *dir, const struct nfs_open_context *ctx, unsigned int flags, int error ), TP_ARGS(dir, ctx, flags, error), TP_STRUCT__entry( __field(unsigned long, error) __field(unsigned long, flags) __field(unsigned long, fmode) __field(dev_t, dev) __field(u64, dir) __string(name, ctx->dentry->d_name.name) ), TP_fast_assign( __entry->error = -error; __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __entry->flags = flags; __entry->fmode = (__force unsigned long)ctx->mode; __assign_str(name); ), TP_printk( "error=%ld (%s) flags=0x%lx (%s) fmode=%s " "name=%02x:%02x:%llu/%s", -__entry->error, show_nfs_status(__entry->error), __entry->flags, show_fs_fcntl_open_flags(__entry->flags), show_fs_fmode_flags(__entry->fmode), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); TRACE_EVENT(nfs_create_enter, TP_PROTO( const struct inode *dir, const struct dentry *dentry, unsigned int flags ), TP_ARGS(dir, dentry, flags), TP_STRUCT__entry( __field(unsigned long, flags) __field(dev_t, dev) __field(u64, dir) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __entry->flags = flags; __assign_str(name); ), TP_printk( "flags=0x%lx (%s) name=%02x:%02x:%llu/%s", __entry->flags, show_fs_fcntl_open_flags(__entry->flags), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); TRACE_EVENT(nfs_create_exit, TP_PROTO( const struct inode *dir, const struct dentry *dentry, unsigned int flags, int error ), TP_ARGS(dir, dentry, flags, error), TP_STRUCT__entry( __field(unsigned long, error) __field(unsigned long, flags) __field(dev_t, dev) __field(u64, dir) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->error = -error; __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __entry->flags = flags; __assign_str(name); ), TP_printk( "error=%ld (%s) flags=0x%lx (%s) name=%02x:%02x:%llu/%s", -__entry->error, show_nfs_status(__entry->error), __entry->flags, show_fs_fcntl_open_flags(__entry->flags), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); DECLARE_EVENT_CLASS(nfs_directory_event, TP_PROTO( const struct inode *dir, const struct dentry *dentry ), TP_ARGS(dir, dentry), TP_STRUCT__entry( __field(dev_t, dev) __field(u64, dir) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __assign_str(name); ), TP_printk( "name=%02x:%02x:%llu/%s", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); #define DEFINE_NFS_DIRECTORY_EVENT(name) \ DEFINE_EVENT(nfs_directory_event, name, \ TP_PROTO( \ const struct inode *dir, \ const struct dentry *dentry \ ), \ TP_ARGS(dir, dentry)) DECLARE_EVENT_CLASS(nfs_directory_event_done, TP_PROTO( const struct inode *dir, const struct dentry *dentry, int error ), TP_ARGS(dir, dentry, error), TP_STRUCT__entry( __field(unsigned long, error) __field(dev_t, dev) __field(u64, dir) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __entry->error = error < 0 ? -error : 0; __assign_str(name); ), TP_printk( "error=%ld (%s) name=%02x:%02x:%llu/%s", -__entry->error, show_nfs_status(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); #define DEFINE_NFS_DIRECTORY_EVENT_DONE(name) \ DEFINE_EVENT(nfs_directory_event_done, name, \ TP_PROTO( \ const struct inode *dir, \ const struct dentry *dentry, \ int error \ ), \ TP_ARGS(dir, dentry, error)) DEFINE_NFS_DIRECTORY_EVENT(nfs_mknod_enter); DEFINE_NFS_DIRECTORY_EVENT_DONE(nfs_mknod_exit); DEFINE_NFS_DIRECTORY_EVENT(nfs_mkdir_enter); DEFINE_NFS_DIRECTORY_EVENT_DONE(nfs_mkdir_exit); DEFINE_NFS_DIRECTORY_EVENT(nfs_rmdir_enter); DEFINE_NFS_DIRECTORY_EVENT_DONE(nfs_rmdir_exit); DEFINE_NFS_DIRECTORY_EVENT(nfs_remove_enter); DEFINE_NFS_DIRECTORY_EVENT_DONE(nfs_remove_exit); DEFINE_NFS_DIRECTORY_EVENT(nfs_unlink_enter); DEFINE_NFS_DIRECTORY_EVENT_DONE(nfs_unlink_exit); DEFINE_NFS_DIRECTORY_EVENT(nfs_symlink_enter); DEFINE_NFS_DIRECTORY_EVENT_DONE(nfs_symlink_exit); TRACE_EVENT(nfs_link_enter, TP_PROTO( const struct inode *inode, const struct inode *dir, const struct dentry *dentry ), TP_ARGS(inode, dir, dentry), TP_STRUCT__entry( __field(dev_t, dev) __field(u64, fileid) __field(u64, dir) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->fileid = NFS_FILEID(inode); __entry->dir = NFS_FILEID(dir); __assign_str(name); ), TP_printk( "fileid=%02x:%02x:%llu name=%02x:%02x:%llu/%s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->fileid, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); TRACE_EVENT(nfs_link_exit, TP_PROTO( const struct inode *inode, const struct inode *dir, const struct dentry *dentry, int error ), TP_ARGS(inode, dir, dentry, error), TP_STRUCT__entry( __field(unsigned long, error) __field(dev_t, dev) __field(u64, fileid) __field(u64, dir) __string(name, dentry->d_name.name) ), TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->fileid = NFS_FILEID(inode); __entry->dir = NFS_FILEID(dir); __entry->error = error < 0 ? -error : 0; __assign_str(name); ), TP_printk( "error=%ld (%s) fileid=%02x:%02x:%llu name=%02x:%02x:%llu/%s", -__entry->error, show_nfs_status(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), __entry->fileid, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); DECLARE_EVENT_CLASS(nfs_rename_event, TP_PROTO( const struct inode *old_dir, const struct dentry *old_dentry, const struct inode *new_dir, const struct dentry *new_dentry ), TP_ARGS(old_dir, old_dentry, new_dir, new_dentry), TP_STRUCT__entry( __field(dev_t, dev) __field(u64, old_dir) __field(u64, new_dir) __string(old_name, old_dentry->d_name.name) __string(new_name, new_dentry->d_name.name) ), TP_fast_assign( __entry->dev = old_dir->i_sb->s_dev; __entry->old_dir = NFS_FILEID(old_dir); __entry->new_dir = NFS_FILEID(new_dir); __assign_str(old_name); __assign_str(new_name); ), TP_printk( "old_name=%02x:%02x:%llu/%s new_name=%02x:%02x:%llu/%s", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->old_dir, __get_str(old_name), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->new_dir, __get_str(new_name) ) ); #define DEFINE_NFS_RENAME_EVENT(name) \ DEFINE_EVENT(nfs_rename_event, name, \ TP_PROTO( \ const struct inode *old_dir, \ const struct dentry *old_dentry, \ const struct inode *new_dir, \ const struct dentry *new_dentry \ ), \ TP_ARGS(old_dir, old_dentry, new_dir, new_dentry)) DECLARE_EVENT_CLASS(nfs_rename_event_done, TP_PROTO( const struct inode *old_dir, const struct dentry *old_dentry, const struct inode *new_dir, const struct dentry *new_dentry, int error ), TP_ARGS(old_dir, old_dentry, new_dir, new_dentry, error), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, error) __field(u64, old_dir) __string(old_name, old_dentry->d_name.name) __field(u64, new_dir) __string(new_name, new_dentry->d_name.name) ), TP_fast_assign( __entry->dev = old_dir->i_sb->s_dev; __entry->error = -error; __entry->old_dir = NFS_FILEID(old_dir); __entry->new_dir = NFS_FILEID(new_dir); __assign_str(old_name); __assign_str(new_name); ), TP_printk( "error=%ld (%s) old_name=%02x:%02x:%llu/%s " "new_name=%02x:%02x:%llu/%s", -__entry->error, show_nfs_status(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->old_dir, __get_str(old_name), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->new_dir, __get_str(new_name) ) ); #define DEFINE_NFS_RENAME_EVENT_DONE(name) \ DEFINE_EVENT(nfs_rename_event_done, name, \ TP_PROTO( \ const struct inode *old_dir, \ const struct dentry *old_dentry, \ const struct inode *new_dir, \ const struct dentry *new_dentry, \ int error \ ), \ TP_ARGS(old_dir, old_dentry, new_dir, \ new_dentry, error)) DEFINE_NFS_RENAME_EVENT(nfs_rename_enter); DEFINE_NFS_RENAME_EVENT_DONE(nfs_rename_exit); DEFINE_NFS_RENAME_EVENT_DONE(nfs_async_rename_done); TRACE_EVENT(nfs_sillyrename_unlink, TP_PROTO( const struct nfs_unlinkdata *data, int error ), TP_ARGS(data, error), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, error) __field(u64, dir) __dynamic_array(char, name, data->args.name.len + 1) ), TP_fast_assign( struct inode *dir = d_inode(data->dentry->d_parent); size_t len = data->args.name.len; __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); __entry->error = -error; memcpy(__get_str(name), data->args.name.name, len); __get_str(name)[len] = 0; ), TP_printk( "error=%ld (%s) name=%02x:%02x:%llu/%s", -__entry->error, show_nfs_status(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->dir, __get_str(name) ) ); DECLARE_EVENT_CLASS(nfs_folio_event, TP_PROTO( const struct inode *inode, loff_t offset, size_t count ), TP_ARGS(inode, offset, count), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(u64, version) __field(loff_t, offset) __field(size_t, count) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); __entry->offset = offset, __entry->count = count; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu " "offset=%lld count=%zu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, __entry->offset, __entry->count ) ); #define DEFINE_NFS_FOLIO_EVENT(name) \ DEFINE_EVENT(nfs_folio_event, name, \ TP_PROTO( \ const struct inode *inode, \ loff_t offset, \ size_t count \ ), \ TP_ARGS(inode, offset, count)) DECLARE_EVENT_CLASS(nfs_folio_event_done, TP_PROTO( const struct inode *inode, loff_t offset, size_t count, int ret ), TP_ARGS(inode, offset, count, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(int, ret) __field(u64, fileid) __field(u64, version) __field(loff_t, offset) __field(size_t, count) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); __entry->offset = offset, __entry->count = count, __entry->ret = ret; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu " "offset=%lld count=%zu ret=%d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, __entry->offset, __entry->count, __entry->ret ) ); #define DEFINE_NFS_FOLIO_EVENT_DONE(name) \ DEFINE_EVENT(nfs_folio_event_done, name, \ TP_PROTO( \ const struct inode *inode, \ loff_t offset, \ size_t count, \ int ret \ ), \ TP_ARGS(inode, offset, count, ret)) DEFINE_NFS_FOLIO_EVENT(nfs_aop_readpage); DEFINE_NFS_FOLIO_EVENT_DONE(nfs_aop_readpage_done); DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio); DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_done); DEFINE_NFS_FOLIO_EVENT(nfs_invalidate_folio); DEFINE_NFS_FOLIO_EVENT_DONE(nfs_launder_folio_done); TRACE_EVENT(nfs_aop_readahead, TP_PROTO( const struct inode *inode, loff_t pos, unsigned int nr_pages ), TP_ARGS(inode, pos, nr_pages), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(u64, version) __field(loff_t, offset) __field(unsigned int, nr_pages) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); __entry->offset = pos; __entry->nr_pages = nr_pages; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu offset=%lld nr_pages=%u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, __entry->offset, __entry->nr_pages ) ); TRACE_EVENT(nfs_aop_readahead_done, TP_PROTO( const struct inode *inode, unsigned int nr_pages, int ret ), TP_ARGS(inode, nr_pages, ret), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(int, ret) __field(u64, fileid) __field(u64, version) __field(loff_t, offset) __field(unsigned int, nr_pages) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->version = inode_peek_iversion_raw(inode); __entry->nr_pages = nr_pages; __entry->ret = ret; ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu nr_pages=%u ret=%d", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->version, __entry->nr_pages, __entry->ret ) ); TRACE_EVENT(nfs_initiate_read, TP_PROTO( const struct nfs_pgio_header *hdr ), TP_ARGS(hdr), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(u32, count) ), TP_fast_assign( const struct inode *inode = hdr->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = hdr->args.fh ? hdr->args.fh : &nfsi->fh; __entry->offset = hdr->args.offset; __entry->count = hdr->args.count; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, __entry->count ) ); TRACE_EVENT(nfs_readpage_done, TP_PROTO( const struct rpc_task *task, const struct nfs_pgio_header *hdr ), TP_ARGS(task, hdr), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(u32, arg_count) __field(u32, res_count) __field(bool, eof) __field(int, error) ), TP_fast_assign( const struct inode *inode = hdr->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = hdr->args.fh ? hdr->args.fh : &nfsi->fh; __entry->error = task->tk_status; __entry->offset = hdr->args.offset; __entry->arg_count = hdr->args.count; __entry->res_count = hdr->res.count; __entry->eof = hdr->res.eof; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk( "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%u res=%u%s", __entry->error, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, __entry->arg_count, __entry->res_count, __entry->eof ? " eof" : "" ) ); TRACE_EVENT(nfs_readpage_short, TP_PROTO( const struct rpc_task *task, const struct nfs_pgio_header *hdr ), TP_ARGS(task, hdr), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(u32, arg_count) __field(u32, res_count) __field(bool, eof) __field(int, error) ), TP_fast_assign( const struct inode *inode = hdr->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = hdr->args.fh ? hdr->args.fh : &nfsi->fh; __entry->error = task->tk_status; __entry->offset = hdr->args.offset; __entry->arg_count = hdr->args.count; __entry->res_count = hdr->res.count; __entry->eof = hdr->res.eof; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk( "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%u res=%u%s", __entry->error, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, __entry->arg_count, __entry->res_count, __entry->eof ? " eof" : "" ) ); TRACE_EVENT(nfs_pgio_error, TP_PROTO( const struct nfs_pgio_header *hdr, int error, loff_t pos ), TP_ARGS(hdr, error, pos), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(u32, arg_count) __field(u32, res_count) __field(loff_t, pos) __field(int, error) ), TP_fast_assign( const struct inode *inode = hdr->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = hdr->args.fh ? hdr->args.fh : &nfsi->fh; __entry->error = error; __entry->offset = hdr->args.offset; __entry->arg_count = hdr->args.count; __entry->res_count = hdr->res.count; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk("error=%d fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%u res=%u pos=%llu", __entry->error, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, __entry->arg_count, __entry->res_count, __entry->pos ) ); TRACE_EVENT(nfs_initiate_write, TP_PROTO( const struct nfs_pgio_header *hdr ), TP_ARGS(hdr), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(u32, count) __field(unsigned long, stable) ), TP_fast_assign( const struct inode *inode = hdr->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = hdr->args.fh ? hdr->args.fh : &nfsi->fh; __entry->offset = hdr->args.offset; __entry->count = hdr->args.count; __entry->stable = hdr->args.stable; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%u stable=%s", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, __entry->count, show_nfs_stable_how(__entry->stable) ) ); TRACE_EVENT(nfs_writeback_done, TP_PROTO( const struct rpc_task *task, const struct nfs_pgio_header *hdr ), TP_ARGS(task, hdr), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(u32, arg_count) __field(u32, res_count) __field(int, error) __field(unsigned long, stable) __array(char, verifier, NFS4_VERIFIER_SIZE) ), TP_fast_assign( const struct inode *inode = hdr->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = hdr->args.fh ? hdr->args.fh : &nfsi->fh; const struct nfs_writeverf *verf = hdr->res.verf; __entry->error = task->tk_status; __entry->offset = hdr->args.offset; __entry->arg_count = hdr->args.count; __entry->res_count = hdr->res.count; __entry->stable = verf->committed; memcpy(__entry->verifier, &verf->verifier, NFS4_VERIFIER_SIZE); __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk( "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%u res=%u stable=%s " "verifier=%s", __entry->error, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, __entry->arg_count, __entry->res_count, show_nfs_stable_how(__entry->stable), show_nfs4_verifier(__entry->verifier) ) ); DECLARE_EVENT_CLASS(nfs_page_error_class, TP_PROTO( const struct inode *inode, const struct nfs_page *req, int error ), TP_ARGS(inode, req, error), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(unsigned int, count) __field(int, error) ), TP_fast_assign( const struct nfs_inode *nfsi = NFS_I(inode); __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->offset = req_offset(req); __entry->count = req->wb_bytes; __entry->error = error; ), TP_printk( "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%u", __entry->error, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->offset, __entry->count ) ); #define DEFINE_NFS_PAGEERR_EVENT(name) \ DEFINE_EVENT(nfs_page_error_class, name, \ TP_PROTO( \ const struct inode *inode, \ const struct nfs_page *req, \ int error \ ), \ TP_ARGS(inode, req, error)) DEFINE_NFS_PAGEERR_EVENT(nfs_write_error); DEFINE_NFS_PAGEERR_EVENT(nfs_comp_error); DEFINE_NFS_PAGEERR_EVENT(nfs_commit_error); TRACE_EVENT(nfs_initiate_commit, TP_PROTO( const struct nfs_commit_data *data ), TP_ARGS(data), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(u32, count) ), TP_fast_assign( const struct inode *inode = data->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = data->args.fh ? data->args.fh : &nfsi->fh; __entry->offset = data->args.offset; __entry->count = data->args.count; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk( "fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%u", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, __entry->count ) ); TRACE_EVENT(nfs_commit_done, TP_PROTO( const struct rpc_task *task, const struct nfs_commit_data *data ), TP_ARGS(task, data), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) __field(loff_t, offset) __field(int, error) __field(unsigned long, stable) __array(char, verifier, NFS4_VERIFIER_SIZE) ), TP_fast_assign( const struct inode *inode = data->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = data->args.fh ? data->args.fh : &nfsi->fh; const struct nfs_writeverf *verf = data->res.verf; __entry->error = task->tk_status; __entry->offset = data->args.offset; __entry->stable = verf->committed; memcpy(__entry->verifier, &verf->verifier, NFS4_VERIFIER_SIZE); __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk( "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld stable=%s verifier=%s", __entry->error, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, show_nfs_stable_how(__entry->stable), show_nfs4_verifier(__entry->verifier) ) ); #define nfs_show_direct_req_flags(v) \ __print_flags(v, "|", \ { NFS_ODIRECT_DO_COMMIT, "DO_COMMIT" }, \ { NFS_ODIRECT_RESCHED_WRITES, "RESCHED_WRITES" }, \ { NFS_ODIRECT_SHOULD_DIRTY, "SHOULD DIRTY" }, \ { NFS_ODIRECT_DONE, "DONE" } ) DECLARE_EVENT_CLASS(nfs_direct_req_class, TP_PROTO( const struct nfs_direct_req *dreq ), TP_ARGS(dreq), TP_STRUCT__entry( __field(dev_t, dev) __field(u64, fileid) __field(u32, fhandle) __field(loff_t, offset) __field(ssize_t, count) __field(ssize_t, error) __field(int, flags) ), TP_fast_assign( const struct inode *inode = dreq->inode; const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = &nfsi->fh; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; __entry->fhandle = nfs_fhandle_hash(fh); __entry->offset = dreq->io_start; __entry->count = dreq->count; __entry->error = dreq->error; __entry->flags = dreq->flags; ), TP_printk( "error=%zd fileid=%02x:%02x:%llu fhandle=0x%08x " "offset=%lld count=%zd flags=%s", __entry->error, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, __entry->offset, __entry->count, nfs_show_direct_req_flags(__entry->flags) ) ); #define DEFINE_NFS_DIRECT_REQ_EVENT(name) \ DEFINE_EVENT(nfs_direct_req_class, name, \ TP_PROTO( \ const struct nfs_direct_req *dreq \ ), \ TP_ARGS(dreq)) DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_commit_complete); DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_resched_write); DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_complete); DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_completion); DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_schedule_iovec); DEFINE_NFS_DIRECT_REQ_EVENT(nfs_direct_write_reschedule_io); TRACE_EVENT(nfs_fh_to_dentry, TP_PROTO( const struct super_block *sb, const struct nfs_fh *fh, u64 fileid, int error ), TP_ARGS(sb, fh, fileid, error), TP_STRUCT__entry( __field(int, error) __field(dev_t, dev) __field(u32, fhandle) __field(u64, fileid) ), TP_fast_assign( __entry->error = error; __entry->dev = sb->s_dev; __entry->fileid = fileid; __entry->fhandle = nfs_fhandle_hash(fh); ), TP_printk( "error=%d fileid=%02x:%02x:%llu fhandle=0x%08x ", __entry->error, MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle ) ); TRACE_EVENT(nfs_mount_assign, TP_PROTO( const char *option, const char *value ), TP_ARGS(option, value), TP_STRUCT__entry( __string(option, option) __string(value, value) ), TP_fast_assign( __assign_str(option); __assign_str(value); ), TP_printk("option %s=%s", __get_str(option), __get_str(value) ) ); TRACE_EVENT(nfs_mount_option, TP_PROTO( const struct fs_parameter *param ), TP_ARGS(param), TP_STRUCT__entry( __string(option, param->key) ), TP_fast_assign( __assign_str(option); ), TP_printk("option %s", __get_str(option)) ); TRACE_EVENT(nfs_mount_path, TP_PROTO( const char *path ), TP_ARGS(path), TP_STRUCT__entry( __string(path, path) ), TP_fast_assign( __assign_str(path); ), TP_printk("path='%s'", __get_str(path)) ); TRACE_EVENT(nfs_local_open_fh, TP_PROTO( const struct nfs_fh *fh, fmode_t fmode, int error ), TP_ARGS(fh, fmode, error), TP_STRUCT__entry( __field(int, error) __field(u32, fhandle) __field(unsigned int, fmode) ), TP_fast_assign( __entry->error = error; __entry->fhandle = nfs_fhandle_hash(fh); __entry->fmode = (__force unsigned int)fmode; ), TP_printk( "error=%d fhandle=0x%08x mode=%s", __entry->error, __entry->fhandle, show_fs_fmode_flags(__entry->fmode) ) ); DECLARE_EVENT_CLASS(nfs_xdr_event, TP_PROTO( const struct xdr_stream *xdr, int error ), TP_ARGS(xdr, error), TP_STRUCT__entry( __field(unsigned int, task_id) __field(unsigned int, client_id) __field(u32, xid) __field(int, version) __field(unsigned long, error) __string(program, xdr->rqst->rq_task->tk_client->cl_program->name) __string(procedure, xdr->rqst->rq_task->tk_msg.rpc_proc->p_name) ), TP_fast_assign( const struct rpc_rqst *rqstp = xdr->rqst; const struct rpc_task *task = rqstp->rq_task; __entry->task_id = task->tk_pid; __entry->client_id = task->tk_client->cl_clid; __entry->xid = be32_to_cpu(rqstp->rq_xid); __entry->version = task->tk_client->cl_vers; __entry->error = error; __assign_str(program); __assign_str(procedure); ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER " xid=0x%08x %sv%d %s error=%ld (%s)", __entry->task_id, __entry->client_id, __entry->xid, __get_str(program), __entry->version, __get_str(procedure), -__entry->error, show_nfs_status(__entry->error) ) ); #define DEFINE_NFS_XDR_EVENT(name) \ DEFINE_EVENT(nfs_xdr_event, name, \ TP_PROTO( \ const struct xdr_stream *xdr, \ int error \ ), \ TP_ARGS(xdr, error)) DEFINE_NFS_XDR_EVENT(nfs_xdr_status); DEFINE_NFS_XDR_EVENT(nfs_xdr_bad_filehandle); #endif /* _TRACE_NFS_H */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE nfstrace /* This part must be outside protection */ #include <trace/define_trace.h> |
6 6 4 2 1 19 2 13 1 3 14 1 1 7 7 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | // SPDX-License-Identifier: GPL-2.0-only #include <net/ip.h> #include <net/tcp.h> #include <net/netfilter/nf_tables.h> #include <linux/netfilter/nfnetlink_osf.h> struct nft_osf { u8 dreg; u8 ttl; u32 flags; }; static const struct nla_policy nft_osf_policy[NFTA_OSF_MAX + 1] = { [NFTA_OSF_DREG] = { .type = NLA_U32 }, [NFTA_OSF_TTL] = { .type = NLA_U8 }, [NFTA_OSF_FLAGS] = { .type = NLA_U32 }, }; static void nft_osf_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_osf *priv = nft_expr_priv(expr); u32 *dest = ®s->data[priv->dreg]; struct sk_buff *skb = pkt->skb; char os_match[NFT_OSF_MAXGENRELEN]; const struct tcphdr *tcp; struct nf_osf_data data; struct tcphdr _tcph; if (pkt->tprot != IPPROTO_TCP) { regs->verdict.code = NFT_BREAK; return; } tcp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(struct tcphdr), &_tcph); if (!tcp) { regs->verdict.code = NFT_BREAK; return; } if (!tcp->syn) { regs->verdict.code = NFT_BREAK; return; } if (!nf_osf_find(skb, nf_osf_fingers, priv->ttl, &data)) { strscpy_pad((char *)dest, "unknown", NFT_OSF_MAXGENRELEN); } else { if (priv->flags & NFT_OSF_F_VERSION) snprintf(os_match, NFT_OSF_MAXGENRELEN, "%s:%s", data.genre, data.version); else strscpy(os_match, data.genre, NFT_OSF_MAXGENRELEN); strscpy_pad((char *)dest, os_match, NFT_OSF_MAXGENRELEN); } } static int nft_osf_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_osf *priv = nft_expr_priv(expr); u32 flags; u8 ttl; if (!tb[NFTA_OSF_DREG]) return -EINVAL; if (tb[NFTA_OSF_TTL]) { ttl = nla_get_u8(tb[NFTA_OSF_TTL]); if (ttl > 2) return -EINVAL; priv->ttl = ttl; } if (tb[NFTA_OSF_FLAGS]) { flags = ntohl(nla_get_be32(tb[NFTA_OSF_FLAGS])); if (flags != NFT_OSF_F_VERSION) return -EINVAL; priv->flags = flags; } return nft_parse_register_store(ctx, tb[NFTA_OSF_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN); } static int nft_osf_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_osf *priv = nft_expr_priv(expr); if (nla_put_u8(skb, NFTA_OSF_TTL, priv->ttl)) goto nla_put_failure; if (nla_put_u32(skb, NFTA_OSF_FLAGS, ntohl((__force __be32)priv->flags))) goto nla_put_failure; if (nft_dump_register(skb, NFTA_OSF_DREG, priv->dreg)) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int nft_osf_validate(const struct nft_ctx *ctx, const struct nft_expr *expr) { unsigned int hooks; switch (ctx->family) { case NFPROTO_IPV4: case NFPROTO_IPV6: case NFPROTO_INET: hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_FORWARD); break; default: return -EOPNOTSUPP; } return nft_chain_validate_hooks(ctx->chain, hooks); } static bool nft_osf_reduce(struct nft_regs_track *track, const struct nft_expr *expr) { struct nft_osf *priv = nft_expr_priv(expr); struct nft_osf *osf; if (!nft_reg_track_cmp(track, expr, priv->dreg)) { nft_reg_track_update(track, expr, priv->dreg, NFT_OSF_MAXGENRELEN); return false; } osf = nft_expr_priv(track->regs[priv->dreg].selector); if (priv->flags != osf->flags || priv->ttl != osf->ttl) { nft_reg_track_update(track, expr, priv->dreg, NFT_OSF_MAXGENRELEN); return false; } if (!track->regs[priv->dreg].bitwise) return true; return false; } static struct nft_expr_type nft_osf_type; static const struct nft_expr_ops nft_osf_op = { .eval = nft_osf_eval, .size = NFT_EXPR_SIZE(sizeof(struct nft_osf)), .init = nft_osf_init, .dump = nft_osf_dump, .type = &nft_osf_type, .validate = nft_osf_validate, .reduce = nft_osf_reduce, }; static struct nft_expr_type nft_osf_type __read_mostly = { .ops = &nft_osf_op, .name = "osf", .owner = THIS_MODULE, .policy = nft_osf_policy, .maxattr = NFTA_OSF_MAX, }; static int __init nft_osf_module_init(void) { return nft_register_expr(&nft_osf_type); } static void __exit nft_osf_module_exit(void) { return nft_unregister_expr(&nft_osf_type); } module_init(nft_osf_module_init); module_exit(nft_osf_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>"); MODULE_ALIAS_NFT_EXPR("osf"); MODULE_DESCRIPTION("nftables passive OS fingerprint support"); |
67 59 58 3 73 73 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 | /* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM huge_memory #if !defined(__HUGE_MEMORY_H) || defined(TRACE_HEADER_MULTI_READ) #define __HUGE_MEMORY_H #include <linux/tracepoint.h> #define SCAN_STATUS \ EM( SCAN_FAIL, "failed") \ EM( SCAN_SUCCEED, "succeeded") \ EM( SCAN_PMD_NULL, "pmd_null") \ EM( SCAN_PMD_NONE, "pmd_none") \ EM( SCAN_PMD_MAPPED, "page_pmd_mapped") \ EM( SCAN_EXCEED_NONE_PTE, "exceed_none_pte") \ EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \ EM( SCAN_EXCEED_SHARED_PTE, "exceed_shared_pte") \ EM( SCAN_PTE_NON_PRESENT, "pte_non_present") \ EM( SCAN_PTE_UFFD_WP, "pte_uffd_wp") \ EM( SCAN_PTE_MAPPED_HUGEPAGE, "pte_mapped_hugepage") \ EM( SCAN_PAGE_RO, "no_writable_page") \ EM( SCAN_LACK_REFERENCED_PAGE, "lack_referenced_page") \ EM( SCAN_PAGE_NULL, "page_null") \ EM( SCAN_SCAN_ABORT, "scan_aborted") \ EM( SCAN_PAGE_COUNT, "not_suitable_page_count") \ EM( SCAN_PAGE_LRU, "page_not_in_lru") \ EM( SCAN_PAGE_LOCK, "page_locked") \ EM( SCAN_PAGE_ANON, "page_not_anon") \ EM( SCAN_PAGE_COMPOUND, "page_compound") \ EM( SCAN_ANY_PROCESS, "no_process_for_page") \ EM( SCAN_VMA_NULL, "vma_null") \ EM( SCAN_VMA_CHECK, "vma_check_failed") \ EM( SCAN_ADDRESS_RANGE, "not_suitable_address_range") \ EM( SCAN_DEL_PAGE_LRU, "could_not_delete_page_from_lru")\ EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \ EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \ EM( SCAN_TRUNCATED, "truncated") \ EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \ EM( SCAN_STORE_FAILED, "store_failed") \ EM( SCAN_COPY_MC, "copy_poisoned_page") \ EMe(SCAN_PAGE_FILLED, "page_filled") #undef EM #undef EMe #define EM(a, b) TRACE_DEFINE_ENUM(a); #define EMe(a, b) TRACE_DEFINE_ENUM(a); SCAN_STATUS #undef EM #undef EMe #define EM(a, b) {a, b}, #define EMe(a, b) {a, b} TRACE_EVENT(mm_khugepaged_scan_pmd, TP_PROTO(struct mm_struct *mm, struct page *page, bool writable, int referenced, int none_or_zero, int status, int unmapped), TP_ARGS(mm, page, writable, referenced, none_or_zero, status, unmapped), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(unsigned long, pfn) __field(bool, writable) __field(int, referenced) __field(int, none_or_zero) __field(int, status) __field(int, unmapped) ), TP_fast_assign( __entry->mm = mm; __entry->pfn = page ? page_to_pfn(page) : -1; __entry->writable = writable; __entry->referenced = referenced; __entry->none_or_zero = none_or_zero; __entry->status = status; __entry->unmapped = unmapped; ), TP_printk("mm=%p, scan_pfn=0x%lx, writable=%d, referenced=%d, none_or_zero=%d, status=%s, unmapped=%d", __entry->mm, __entry->pfn, __entry->writable, __entry->referenced, __entry->none_or_zero, __print_symbolic(__entry->status, SCAN_STATUS), __entry->unmapped) ); TRACE_EVENT(mm_collapse_huge_page, TP_PROTO(struct mm_struct *mm, int isolated, int status), TP_ARGS(mm, isolated, status), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(int, isolated) __field(int, status) ), TP_fast_assign( __entry->mm = mm; __entry->isolated = isolated; __entry->status = status; ), TP_printk("mm=%p, isolated=%d, status=%s", __entry->mm, __entry->isolated, __print_symbolic(__entry->status, SCAN_STATUS)) ); TRACE_EVENT(mm_collapse_huge_page_isolate, TP_PROTO(struct page *page, int none_or_zero, int referenced, bool writable, int status), TP_ARGS(page, none_or_zero, referenced, writable, status), TP_STRUCT__entry( __field(unsigned long, pfn) __field(int, none_or_zero) __field(int, referenced) __field(bool, writable) __field(int, status) ), TP_fast_assign( __entry->pfn = page ? page_to_pfn(page) : -1; __entry->none_or_zero = none_or_zero; __entry->referenced = referenced; __entry->writable = writable; __entry->status = status; ), TP_printk("scan_pfn=0x%lx, none_or_zero=%d, referenced=%d, writable=%d, status=%s", __entry->pfn, __entry->none_or_zero, __entry->referenced, __entry->writable, __print_symbolic(__entry->status, SCAN_STATUS)) ); TRACE_EVENT(mm_collapse_huge_page_swapin, TP_PROTO(struct mm_struct *mm, int swapped_in, int referenced, int ret), TP_ARGS(mm, swapped_in, referenced, ret), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(int, swapped_in) __field(int, referenced) __field(int, ret) ), TP_fast_assign( __entry->mm = mm; __entry->swapped_in = swapped_in; __entry->referenced = referenced; __entry->ret = ret; ), TP_printk("mm=%p, swapped_in=%d, referenced=%d, ret=%d", __entry->mm, __entry->swapped_in, __entry->referenced, __entry->ret) ); TRACE_EVENT(mm_khugepaged_scan_file, TP_PROTO(struct mm_struct *mm, struct folio *folio, struct file *file, int present, int swap, int result), TP_ARGS(mm, folio, file, present, swap, result), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(unsigned long, pfn) __string(filename, file->f_path.dentry->d_iname) __field(int, present) __field(int, swap) __field(int, result) ), TP_fast_assign( __entry->mm = mm; __entry->pfn = folio ? folio_pfn(folio) : -1; __assign_str(filename); __entry->present = present; __entry->swap = swap; __entry->result = result; ), TP_printk("mm=%p, scan_pfn=0x%lx, filename=%s, present=%d, swap=%d, result=%s", __entry->mm, __entry->pfn, __get_str(filename), __entry->present, __entry->swap, __print_symbolic(__entry->result, SCAN_STATUS)) ); TRACE_EVENT(mm_khugepaged_collapse_file, TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index, unsigned long addr, bool is_shmem, struct file *file, int nr, int result), TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result), TP_STRUCT__entry( __field(struct mm_struct *, mm) __field(unsigned long, hpfn) __field(pgoff_t, index) __field(unsigned long, addr) __field(bool, is_shmem) __string(filename, file->f_path.dentry->d_iname) __field(int, nr) __field(int, result) ), TP_fast_assign( __entry->mm = mm; __entry->hpfn = new_folio ? folio_pfn(new_folio) : -1; __entry->index = index; __entry->addr = addr; __entry->is_shmem = is_shmem; __assign_str(filename); __entry->nr = nr; __entry->result = result; ), TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%lx, is_shmem=%d, filename=%s, nr=%d, result=%s", __entry->mm, __entry->hpfn, __entry->index, __entry->addr, __entry->is_shmem, __get_str(filename), __entry->nr, __print_symbolic(__entry->result, SCAN_STATUS)) ); #endif /* __HUGE_MEMORY_H */ #include <trace/define_trace.h> |
2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 | // SPDX-License-Identifier: GPL-2.0 /* * platform.c - platform 'pseudo' bus for legacy devices * * Copyright (c) 2002-3 Patrick Mochel * Copyright (c) 2002-3 Open Source Development Labs * * Please see Documentation/driver-api/driver-model/platform.rst for more * information. */ #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/dma-mapping.h> #include <linux/memblock.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/pm_domain.h> #include <linux/idr.h> #include <linux/acpi.h> #include <linux/clk/clk-conf.h> #include <linux/limits.h> #include <linux/property.h> #include <linux/kmemleak.h> #include <linux/types.h> #include <linux/iommu.h> #include <linux/dma-map-ops.h> #include "base.h" #include "power/power.h" /* For automatically allocated device IDs */ static DEFINE_IDA(platform_devid_ida); struct device platform_bus = { .init_name = "platform", }; EXPORT_SYMBOL_GPL(platform_bus); /** * platform_get_resource - get a resource for a device * @dev: platform device * @type: resource type * @num: resource index * * Return: a pointer to the resource or NULL on failure. */ struct resource *platform_get_resource(struct platform_device *dev, unsigned int type, unsigned int num) { u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if (type == resource_type(r) && num-- == 0) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_resource); struct resource *platform_get_mem_or_io(struct platform_device *dev, unsigned int num) { u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if ((resource_type(r) & (IORESOURCE_MEM|IORESOURCE_IO)) && num-- == 0) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_mem_or_io); #ifdef CONFIG_HAS_IOMEM /** * devm_platform_get_and_ioremap_resource - call devm_ioremap_resource() for a * platform device and get resource * * @pdev: platform device to use both for memory resource lookup as well as * resource management * @index: resource index * @res: optional output parameter to store a pointer to the obtained resource. * * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. */ void __iomem * devm_platform_get_and_ioremap_resource(struct platform_device *pdev, unsigned int index, struct resource **res) { struct resource *r; r = platform_get_resource(pdev, IORESOURCE_MEM, index); if (res) *res = r; return devm_ioremap_resource(&pdev->dev, r); } EXPORT_SYMBOL_GPL(devm_platform_get_and_ioremap_resource); /** * devm_platform_ioremap_resource - call devm_ioremap_resource() for a platform * device * * @pdev: platform device to use both for memory resource lookup as well as * resource management * @index: resource index * * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. */ void __iomem *devm_platform_ioremap_resource(struct platform_device *pdev, unsigned int index) { return devm_platform_get_and_ioremap_resource(pdev, index, NULL); } EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource); /** * devm_platform_ioremap_resource_byname - call devm_ioremap_resource for * a platform device, retrieve the * resource by name * * @pdev: platform device to use both for memory resource lookup as well as * resource management * @name: name of the resource * * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. */ void __iomem * devm_platform_ioremap_resource_byname(struct platform_device *pdev, const char *name) { struct resource *res; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); return devm_ioremap_resource(&pdev->dev, res); } EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname); #endif /* CONFIG_HAS_IOMEM */ /** * platform_get_irq_optional - get an optional IRQ for a device * @dev: platform device * @num: IRQ number index * * Gets an IRQ for a platform device. Device drivers should check the return * value for errors so as to not pass a negative integer value to the * request_irq() APIs. This is the same as platform_get_irq(), except that it * does not print an error message if an IRQ can not be obtained. * * For example:: * * int irq = platform_get_irq_optional(pdev, 0); * if (irq < 0) * return irq; * * Return: non-zero IRQ number on success, negative error number on failure. */ int platform_get_irq_optional(struct platform_device *dev, unsigned int num) { int ret; #ifdef CONFIG_SPARC /* sparc does not have irqs represented as IORESOURCE_IRQ resources */ if (!dev || num >= dev->archdata.num_irqs) goto out_not_found; ret = dev->archdata.irqs[num]; goto out; #else struct fwnode_handle *fwnode = dev_fwnode(&dev->dev); struct resource *r; if (is_of_node(fwnode)) { ret = of_irq_get(to_of_node(fwnode), num); if (ret > 0 || ret == -EPROBE_DEFER) goto out; } r = platform_get_resource(dev, IORESOURCE_IRQ, num); if (is_acpi_device_node(fwnode)) { if (r && r->flags & IORESOURCE_DISABLED) { ret = acpi_irq_get(ACPI_HANDLE_FWNODE(fwnode), num, r); if (ret) goto out; } } /* * The resources may pass trigger flags to the irqs that need * to be set up. It so happens that the trigger flags for * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER* * settings. */ if (r && r->flags & IORESOURCE_BITS) { struct irq_data *irqd; irqd = irq_get_irq_data(r->start); if (!irqd) goto out_not_found; irqd_set_trigger_type(irqd, r->flags & IORESOURCE_BITS); } if (r) { ret = r->start; goto out; } /* * For the index 0 interrupt, allow falling back to GpioInt * resources. While a device could have both Interrupt and GpioInt * resources, making this fallback ambiguous, in many common cases * the device will only expose one IRQ, and this fallback * allows a common code path across either kind of resource. */ if (num == 0 && is_acpi_device_node(fwnode)) { ret = acpi_dev_gpio_irq_get(to_acpi_device_node(fwnode), num); /* Our callers expect -ENXIO for missing IRQs. */ if (ret >= 0 || ret == -EPROBE_DEFER) goto out; } #endif out_not_found: ret = -ENXIO; out: if (WARN(!ret, "0 is an invalid IRQ number\n")) return -EINVAL; return ret; } EXPORT_SYMBOL_GPL(platform_get_irq_optional); /** * platform_get_irq - get an IRQ for a device * @dev: platform device * @num: IRQ number index * * Gets an IRQ for a platform device and prints an error message if finding the * IRQ fails. Device drivers should check the return value for errors so as to * not pass a negative integer value to the request_irq() APIs. * * For example:: * * int irq = platform_get_irq(pdev, 0); * if (irq < 0) * return irq; * * Return: non-zero IRQ number on success, negative error number on failure. */ int platform_get_irq(struct platform_device *dev, unsigned int num) { int ret; ret = platform_get_irq_optional(dev, num); if (ret < 0) return dev_err_probe(&dev->dev, ret, "IRQ index %u not found\n", num); return ret; } EXPORT_SYMBOL_GPL(platform_get_irq); /** * platform_irq_count - Count the number of IRQs a platform device uses * @dev: platform device * * Return: Number of IRQs a platform device uses or EPROBE_DEFER */ int platform_irq_count(struct platform_device *dev) { int ret, nr = 0; while ((ret = platform_get_irq_optional(dev, nr)) >= 0) nr++; if (ret == -EPROBE_DEFER) return ret; return nr; } EXPORT_SYMBOL_GPL(platform_irq_count); struct irq_affinity_devres { unsigned int count; unsigned int irq[] __counted_by(count); }; static void platform_disable_acpi_irq(struct platform_device *pdev, int index) { struct resource *r; r = platform_get_resource(pdev, IORESOURCE_IRQ, index); if (r) irqresource_disabled(r, 0); } static void devm_platform_get_irqs_affinity_release(struct device *dev, void *res) { struct irq_affinity_devres *ptr = res; int i; for (i = 0; i < ptr->count; i++) { irq_dispose_mapping(ptr->irq[i]); if (is_acpi_device_node(dev_fwnode(dev))) platform_disable_acpi_irq(to_platform_device(dev), i); } } /** * devm_platform_get_irqs_affinity - devm method to get a set of IRQs for a * device using an interrupt affinity descriptor * @dev: platform device pointer * @affd: affinity descriptor * @minvec: minimum count of interrupt vectors * @maxvec: maximum count of interrupt vectors * @irqs: pointer holder for IRQ numbers * * Gets a set of IRQs for a platform device, and updates IRQ afffinty according * to the passed affinity descriptor * * Return: Number of vectors on success, negative error number on failure. */ int devm_platform_get_irqs_affinity(struct platform_device *dev, struct irq_affinity *affd, unsigned int minvec, unsigned int maxvec, int **irqs) { struct irq_affinity_devres *ptr; struct irq_affinity_desc *desc; size_t size; int i, ret, nvec; if (!affd) return -EPERM; if (maxvec < minvec) return -ERANGE; nvec = platform_irq_count(dev); if (nvec < 0) return nvec; if (nvec < minvec) return -ENOSPC; nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; if (nvec > maxvec) nvec = maxvec; size = sizeof(*ptr) + sizeof(unsigned int) * nvec; ptr = devres_alloc(devm_platform_get_irqs_affinity_release, size, GFP_KERNEL); if (!ptr) return -ENOMEM; ptr->count = nvec; for (i = 0; i < nvec; i++) { int irq = platform_get_irq(dev, i); if (irq < 0) { ret = irq; goto err_free_devres; } ptr->irq[i] = irq; } desc = irq_create_affinity_masks(nvec, affd); if (!desc) { ret = -ENOMEM; goto err_free_devres; } for (i = 0; i < nvec; i++) { ret = irq_update_affinity_desc(ptr->irq[i], &desc[i]); if (ret) { dev_err(&dev->dev, "failed to update irq%d affinity descriptor (%d)\n", ptr->irq[i], ret); goto err_free_desc; } } devres_add(&dev->dev, ptr); kfree(desc); *irqs = ptr->irq; return nvec; err_free_desc: kfree(desc); err_free_devres: devres_free(ptr); return ret; } EXPORT_SYMBOL_GPL(devm_platform_get_irqs_affinity); /** * platform_get_resource_byname - get a resource for a device by name * @dev: platform device * @type: resource type * @name: resource name */ struct resource *platform_get_resource_byname(struct platform_device *dev, unsigned int type, const char *name) { u32 i; for (i = 0; i < dev->num_resources; i++) { struct resource *r = &dev->resource[i]; if (unlikely(!r->name)) continue; if (type == resource_type(r) && !strcmp(r->name, name)) return r; } return NULL; } EXPORT_SYMBOL_GPL(platform_get_resource_byname); static int __platform_get_irq_byname(struct platform_device *dev, const char *name) { struct resource *r; int ret; ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name); if (ret > 0 || ret == -EPROBE_DEFER) return ret; r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); if (r) { if (WARN(!r->start, "0 is an invalid IRQ number\n")) return -EINVAL; return r->start; } return -ENXIO; } /** * platform_get_irq_byname - get an IRQ for a device by name * @dev: platform device * @name: IRQ name * * Get an IRQ like platform_get_irq(), but then by name rather then by index. * * Return: non-zero IRQ number on success, negative error number on failure. */ int platform_get_irq_byname(struct platform_device *dev, const char *name) { int ret; ret = __platform_get_irq_byname(dev, name); if (ret < 0) return dev_err_probe(&dev->dev, ret, "IRQ %s not found\n", name); return ret; } EXPORT_SYMBOL_GPL(platform_get_irq_byname); /** * platform_get_irq_byname_optional - get an optional IRQ for a device by name * @dev: platform device * @name: IRQ name * * Get an optional IRQ by name like platform_get_irq_byname(). Except that it * does not print an error message if an IRQ can not be obtained. * * Return: non-zero IRQ number on success, negative error number on failure. */ int platform_get_irq_byname_optional(struct platform_device *dev, const char *name) { return __platform_get_irq_byname(dev, name); } EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional); /** * platform_add_devices - add a numbers of platform devices * @devs: array of platform devices to add * @num: number of platform devices in array * * Return: 0 on success, negative error number on failure. */ int platform_add_devices(struct platform_device **devs, int num) { int i, ret = 0; for (i = 0; i < num; i++) { ret = platform_device_register(devs[i]); if (ret) { while (--i >= 0) platform_device_unregister(devs[i]); break; } } return ret; } EXPORT_SYMBOL_GPL(platform_add_devices); struct platform_object { struct platform_device pdev; char name[]; }; /* * Set up default DMA mask for platform devices if the they weren't * previously set by the architecture / DT. */ static void setup_pdev_dma_masks(struct platform_device *pdev) { pdev->dev.dma_parms = &pdev->dma_parms; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (!pdev->dev.dma_mask) { pdev->platform_dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->platform_dma_mask; } }; /** * platform_device_put - destroy a platform device * @pdev: platform device to free * * Free all memory associated with a platform device. This function must * _only_ be externally called in error cases. All other usage is a bug. */ void platform_device_put(struct platform_device *pdev) { if (!IS_ERR_OR_NULL(pdev)) put_device(&pdev->dev); } EXPORT_SYMBOL_GPL(platform_device_put); static void platform_device_release(struct device *dev) { struct platform_object *pa = container_of(dev, struct platform_object, pdev.dev); of_node_put(pa->pdev.dev.of_node); kfree(pa->pdev.dev.platform_data); kfree(pa->pdev.mfd_cell); kfree(pa->pdev.resource); kfree(pa->pdev.driver_override); kfree(pa); } /** * platform_device_alloc - create a platform device * @name: base name of the device we're adding * @id: instance id * * Create a platform device object which can have other objects attached * to it, and which will have attached objects freed when it is released. */ struct platform_device *platform_device_alloc(const char *name, int id) { struct platform_object *pa; pa = kzalloc(sizeof(*pa) + strlen(name) + 1, GFP_KERNEL); if (pa) { strcpy(pa->name, name); pa->pdev.name = pa->name; pa->pdev.id = id; device_initialize(&pa->pdev.dev); pa->pdev.dev.release = platform_device_release; setup_pdev_dma_masks(&pa->pdev); } return pa ? &pa->pdev : NULL; } EXPORT_SYMBOL_GPL(platform_device_alloc); /** * platform_device_add_resources - add resources to a platform device * @pdev: platform device allocated by platform_device_alloc to add resources to * @res: set of resources that needs to be allocated for the device * @num: number of resources * * Add a copy of the resources to the platform device. The memory * associated with the resources will be freed when the platform device is * released. */ int platform_device_add_resources(struct platform_device *pdev, const struct resource *res, unsigned int num) { struct resource *r = NULL; if (res) { r = kmemdup_array(res, num, sizeof(*r), GFP_KERNEL); if (!r) return -ENOMEM; } kfree(pdev->resource); pdev->resource = r; pdev->num_resources = num; return 0; } EXPORT_SYMBOL_GPL(platform_device_add_resources); /** * platform_device_add_data - add platform-specific data to a platform device * @pdev: platform device allocated by platform_device_alloc to add resources to * @data: platform specific data for this platform device * @size: size of platform specific data * * Add a copy of platform specific data to the platform device's * platform_data pointer. The memory associated with the platform data * will be freed when the platform device is released. */ int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size) { void *d = NULL; if (data) { d = kmemdup(data, size, GFP_KERNEL); if (!d) return -ENOMEM; } kfree(pdev->dev.platform_data); pdev->dev.platform_data = d; return 0; } EXPORT_SYMBOL_GPL(platform_device_add_data); /** * platform_device_add - add a platform device to device hierarchy * @pdev: platform device we're adding * * This is part 2 of platform_device_register(), though may be called * separately _iff_ pdev was allocated by platform_device_alloc(). */ int platform_device_add(struct platform_device *pdev) { struct device *dev = &pdev->dev; u32 i; int ret; if (!dev->parent) dev->parent = &platform_bus; dev->bus = &platform_bus_type; switch (pdev->id) { default: dev_set_name(dev, "%s.%d", pdev->name, pdev->id); break; case PLATFORM_DEVID_NONE: dev_set_name(dev, "%s", pdev->name); break; case PLATFORM_DEVID_AUTO: /* * Automatically allocated device ID. We mark it as such so * that we remember it must be freed, and we append a suffix * to avoid namespace collision with explicit IDs. */ ret = ida_alloc(&platform_devid_ida, GFP_KERNEL); if (ret < 0) return ret; pdev->id = ret; pdev->id_auto = true; dev_set_name(dev, "%s.%d.auto", pdev->name, pdev->id); break; } for (i = 0; i < pdev->num_resources; i++) { struct resource *p, *r = &pdev->resource[i]; if (r->name == NULL) r->name = dev_name(dev); p = r->parent; if (!p) { if (resource_type(r) == IORESOURCE_MEM) p = &iomem_resource; else if (resource_type(r) == IORESOURCE_IO) p = &ioport_resource; } if (p) { ret = insert_resource(p, r); if (ret) { dev_err(dev, "failed to claim resource %d: %pR\n", i, r); goto failed; } } } pr_debug("Registering platform device '%s'. Parent at %s\n", dev_name(dev), dev_name(dev->parent)); ret = device_add(dev); if (ret) goto failed; return 0; failed: if (pdev->id_auto) { ida_free(&platform_devid_ida, pdev->id); pdev->id = PLATFORM_DEVID_AUTO; } while (i--) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); } return ret; } EXPORT_SYMBOL_GPL(platform_device_add); /** * platform_device_del - remove a platform-level device * @pdev: platform device we're removing * * Note that this function will also release all memory- and port-based * resources owned by the device (@dev->resource). This function must * _only_ be externally called in error cases. All other usage is a bug. */ void platform_device_del(struct platform_device *pdev) { u32 i; if (!IS_ERR_OR_NULL(pdev)) { device_del(&pdev->dev); if (pdev->id_auto) { ida_free(&platform_devid_ida, pdev->id); pdev->id = PLATFORM_DEVID_AUTO; } for (i = 0; i < pdev->num_resources; i++) { struct resource *r = &pdev->resource[i]; if (r->parent) release_resource(r); } } } EXPORT_SYMBOL_GPL(platform_device_del); /** * platform_device_register - add a platform-level device * @pdev: platform device we're adding * * NOTE: _Never_ directly free @pdev after calling this function, even if it * returned an error! Always use platform_device_put() to give up the * reference initialised in this function instead. */ int platform_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); setup_pdev_dma_masks(pdev); return platform_device_add(pdev); } EXPORT_SYMBOL_GPL(platform_device_register); /** * platform_device_unregister - unregister a platform-level device * @pdev: platform device we're unregistering * * Unregistration is done in 2 steps. First we release all resources * and remove it from the subsystem, then we drop reference count by * calling platform_device_put(). */ void platform_device_unregister(struct platform_device *pdev) { platform_device_del(pdev); platform_device_put(pdev); } EXPORT_SYMBOL_GPL(platform_device_unregister); /** * platform_device_register_full - add a platform-level device with * resources and platform-specific data * * @pdevinfo: data used to create device * * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device *platform_device_register_full( const struct platform_device_info *pdevinfo) { int ret; struct platform_device *pdev; pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); if (!pdev) return ERR_PTR(-ENOMEM); pdev->dev.parent = pdevinfo->parent; pdev->dev.fwnode = pdevinfo->fwnode; pdev->dev.of_node = of_node_get(to_of_node(pdev->dev.fwnode)); pdev->dev.of_node_reused = pdevinfo->of_node_reused; if (pdevinfo->dma_mask) { pdev->platform_dma_mask = pdevinfo->dma_mask; pdev->dev.dma_mask = &pdev->platform_dma_mask; pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; } ret = platform_device_add_resources(pdev, pdevinfo->res, pdevinfo->num_res); if (ret) goto err; ret = platform_device_add_data(pdev, pdevinfo->data, pdevinfo->size_data); if (ret) goto err; if (pdevinfo->properties) { ret = device_create_managed_software_node(&pdev->dev, pdevinfo->properties, NULL); if (ret) goto err; } ret = platform_device_add(pdev); if (ret) { err: ACPI_COMPANION_SET(&pdev->dev, NULL); platform_device_put(pdev); return ERR_PTR(ret); } return pdev; } EXPORT_SYMBOL_GPL(platform_device_register_full); /** * __platform_driver_register - register a driver for platform-level devices * @drv: platform driver structure * @owner: owning module/driver */ int __platform_driver_register(struct platform_driver *drv, struct module *owner) { drv->driver.owner = owner; drv->driver.bus = &platform_bus_type; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(__platform_driver_register); /** * platform_driver_unregister - unregister a driver for platform-level devices * @drv: platform driver structure */ void platform_driver_unregister(struct platform_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(platform_driver_unregister); static int platform_probe_fail(struct platform_device *pdev) { return -ENXIO; } static int is_bound_to_driver(struct device *dev, void *driver) { if (dev->driver == driver) return 1; return 0; } /** * __platform_driver_probe - register driver for non-hotpluggable device * @drv: platform driver structure * @probe: the driver probe routine, probably from an __init section * @module: module which will be the owner of the driver * * Use this instead of platform_driver_register() when you know the device * is not hotpluggable and has already been registered, and you want to * remove its run-once probe() infrastructure from memory after the driver * has bound to the device. * * One typical use for this would be with drivers for controllers integrated * into system-on-chip processors, where the controller devices have been * configured as part of board setup. * * Note that this is incompatible with deferred probing. * * Returns zero if the driver registered and bound to a device, else returns * a negative error code and with the driver not registered. */ int __init_or_module __platform_driver_probe(struct platform_driver *drv, int (*probe)(struct platform_device *), struct module *module) { int retval; if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) { pr_err("%s: drivers registered with %s can not be probed asynchronously\n", drv->driver.name, __func__); return -EINVAL; } /* * We have to run our probes synchronously because we check if * we find any devices to bind to and exit with error if there * are any. */ drv->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; /* * Prevent driver from requesting probe deferral to avoid further * futile probe attempts. */ drv->prevent_deferred_probe = true; /* make sure driver won't have bind/unbind attributes */ drv->driver.suppress_bind_attrs = true; /* temporary section violation during probe() */ drv->probe = probe; retval = __platform_driver_register(drv, module); if (retval) return retval; /* Force all new probes of this driver to fail */ drv->probe = platform_probe_fail; /* Walk all platform devices and see if any actually bound to this driver. * If not, return an error as the device should have done so by now. */ if (!bus_for_each_dev(&platform_bus_type, NULL, &drv->driver, is_bound_to_driver)) { retval = -ENODEV; platform_driver_unregister(drv); } return retval; } EXPORT_SYMBOL_GPL(__platform_driver_probe); /** * __platform_create_bundle - register driver and create corresponding device * @driver: platform driver structure * @probe: the driver probe routine, probably from an __init section * @res: set of resources that needs to be allocated for the device * @n_res: number of resources * @data: platform specific data for this platform device * @size: size of platform specific data * @module: module which will be the owner of the driver * * Use this in legacy-style modules that probe hardware directly and * register a single platform device and corresponding platform driver. * * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device * __init_or_module __platform_create_bundle( struct platform_driver *driver, int (*probe)(struct platform_device *), struct resource *res, unsigned int n_res, const void *data, size_t size, struct module *module) { struct platform_device *pdev; int error; pdev = platform_device_alloc(driver->driver.name, -1); if (!pdev) { error = -ENOMEM; goto err_out; } error = platform_device_add_resources(pdev, res, n_res); if (error) goto err_pdev_put; error = platform_device_add_data(pdev, data, size); if (error) goto err_pdev_put; error = platform_device_add(pdev); if (error) goto err_pdev_put; error = __platform_driver_probe(driver, probe, module); if (error) goto err_pdev_del; return pdev; err_pdev_del: platform_device_del(pdev); err_pdev_put: platform_device_put(pdev); err_out: return ERR_PTR(error); } EXPORT_SYMBOL_GPL(__platform_create_bundle); /** * __platform_register_drivers - register an array of platform drivers * @drivers: an array of drivers to register * @count: the number of drivers to register * @owner: module owning the drivers * * Registers platform drivers specified by an array. On failure to register a * driver, all previously registered drivers will be unregistered. Callers of * this API should use platform_unregister_drivers() to unregister drivers in * the reverse order. * * Returns: 0 on success or a negative error code on failure. */ int __platform_register_drivers(struct platform_driver * const *drivers, unsigned int count, struct module *owner) { unsigned int i; int err; for (i = 0; i < count; i++) { pr_debug("registering platform driver %ps\n", drivers[i]); err = __platform_driver_register(drivers[i], owner); if (err < 0) { pr_err("failed to register platform driver %ps: %d\n", drivers[i], err); goto error; } } return 0; error: while (i--) { pr_debug("unregistering platform driver %ps\n", drivers[i]); platform_driver_unregister(drivers[i]); } return err; } EXPORT_SYMBOL_GPL(__platform_register_drivers); /** * platform_unregister_drivers - unregister an array of platform drivers * @drivers: an array of drivers to unregister * @count: the number of drivers to unregister * * Unregisters platform drivers specified by an array. This is typically used * to complement an earlier call to platform_register_drivers(). Drivers are * unregistered in the reverse order in which they were registered. */ void platform_unregister_drivers(struct platform_driver * const *drivers, unsigned int count) { while (count--) { pr_debug("unregistering platform driver %ps\n", drivers[count]); platform_driver_unregister(drivers[count]); } } EXPORT_SYMBOL_GPL(platform_unregister_drivers); static const struct platform_device_id *platform_match_id( const struct platform_device_id *id, struct platform_device *pdev) { while (id->name[0]) { if (strcmp(pdev->name, id->name) == 0) { pdev->id_entry = id; return id; } id++; } return NULL; } #ifdef CONFIG_PM_SLEEP static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) { struct platform_driver *pdrv = to_platform_driver(dev->driver); struct platform_device *pdev = to_platform_device(dev); int ret = 0; if (dev->driver && pdrv->suspend) ret = pdrv->suspend(pdev, mesg); return ret; } static int platform_legacy_resume(struct device *dev) { struct platform_driver *pdrv = to_platform_driver(dev->driver); struct platform_device *pdev = to_platform_device(dev); int ret = 0; if (dev->driver && pdrv->resume) ret = pdrv->resume(pdev); return ret; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND int platform_pm_suspend(struct device *dev) { const struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->suspend) ret = drv->pm->suspend(dev); } else { ret = platform_legacy_suspend(dev, PMSG_SUSPEND); } return ret; } int platform_pm_resume(struct device *dev) { const struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->resume) ret = drv->pm->resume(dev); } else { ret = platform_legacy_resume(dev); } return ret; } #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS int platform_pm_freeze(struct device *dev) { const struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->freeze) ret = drv->pm->freeze(dev); } else { ret = platform_legacy_suspend(dev, PMSG_FREEZE); } return ret; } int platform_pm_thaw(struct device *dev) { const struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->thaw) ret = drv->pm->thaw(dev); } else { ret = platform_legacy_resume(dev); } return ret; } int platform_pm_poweroff(struct device *dev) { const struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->poweroff) ret = drv->pm->poweroff(dev); } else { ret = platform_legacy_suspend(dev, PMSG_HIBERNATE); } return ret; } int platform_pm_restore(struct device *dev) { const struct device_driver *drv = dev->driver; int ret = 0; if (!drv) return 0; if (drv->pm) { if (drv->pm->restore) ret = drv->pm->restore(dev); } else { ret = platform_legacy_resume(dev); } return ret; } #endif /* CONFIG_HIBERNATE_CALLBACKS */ /* modalias support enables more hands-off userspace setup: * (a) environment variable lets new-style hotplug events work once system is * fully running: "modprobe $MODALIAS" * (b) sysfs attribute lets new-style coldplug recover from hotplug events * mishandled before system is fully running: "modprobe $(cat modalias)" */ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); int len; len = of_device_modalias(dev, buf, PAGE_SIZE); if (len != -ENODEV) return len; len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1); if (len != -ENODEV) return len; return sysfs_emit(buf, "platform:%s\n", pdev->name); } static DEVICE_ATTR_RO(modalias); static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", dev_to_node(dev)); } static DEVICE_ATTR_RO(numa_node); static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); ssize_t len; device_lock(dev); len = sysfs_emit(buf, "%s\n", pdev->driver_override); device_unlock(dev); return len; } static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct platform_device *pdev = to_platform_device(dev); int ret; ret = driver_set_override(dev, &pdev->driver_override, buf, count); if (ret) return ret; return count; } static DEVICE_ATTR_RW(driver_override); static struct attribute *platform_dev_attrs[] = { &dev_attr_modalias.attr, &dev_attr_numa_node.attr, &dev_attr_driver_override.attr, NULL, }; static umode_t platform_dev_attrs_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, typeof(*dev), kobj); if (a == &dev_attr_numa_node.attr && dev_to_node(dev) == NUMA_NO_NODE) return 0; return a->mode; } static const struct attribute_group platform_dev_group = { .attrs = platform_dev_attrs, .is_visible = platform_dev_attrs_visible, }; __ATTRIBUTE_GROUPS(platform_dev); /** * platform_match - bind platform device to platform driver. * @dev: device. * @drv: driver. * * Platform device IDs are assumed to be encoded like this: * "<name><instance>", where <name> is a short description of the type of * device, like "pci" or "floppy", and <instance> is the enumerated * instance of the device, like '0' or '42'. Driver IDs are simply * "<name>". So, extract the <name> from the platform_device structure, * and compare it against the name of the driver. Return whether they match * or not. */ static int platform_match(struct device *dev, const struct device_driver *drv) { struct platform_device *pdev = to_platform_device(dev); struct platform_driver *pdrv = to_platform_driver(drv); /* When driver_override is set, only bind to the matching driver */ if (pdev->driver_override) return !strcmp(pdev->driver_override, drv->name); /* Attempt an OF style match first */ if (of_driver_match_device(dev, drv)) return 1; /* Then try ACPI style match */ if (acpi_driver_match_device(dev, drv)) return 1; /* Then try to match against the id table */ if (pdrv->id_table) return platform_match_id(pdrv->id_table, pdev) != NULL; /* fall-back to driver name match */ return (strcmp(pdev->name, drv->name) == 0); } static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct platform_device *pdev = to_platform_device(dev); int rc; /* Some devices have extra OF data and an OF-style MODALIAS */ rc = of_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; rc = acpi_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, pdev->name); return 0; } static int platform_probe(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); int ret; /* * A driver registered using platform_driver_probe() cannot be bound * again later because the probe function usually lives in __init code * and so is gone. For these drivers .probe is set to * platform_probe_fail in __platform_driver_probe(). Don't even prepare * clocks and PM domains for these to match the traditional behaviour. */ if (unlikely(drv->probe == platform_probe_fail)) return -ENXIO; ret = of_clk_set_defaults(_dev->of_node, false); if (ret < 0) return ret; ret = dev_pm_domain_attach(_dev, true); if (ret) goto out; if (drv->probe) { ret = drv->probe(dev); if (ret) dev_pm_domain_detach(_dev, true); } out: if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) { dev_warn(_dev, "probe deferral not supported\n"); ret = -ENXIO; } return ret; } static void platform_remove(struct device *_dev) { struct platform_driver *drv = to_platform_driver(_dev->driver); struct platform_device *dev = to_platform_device(_dev); if (drv->remove) drv->remove(dev); dev_pm_domain_detach(_dev, true); } static void platform_shutdown(struct device *_dev) { struct platform_device *dev = to_platform_device(_dev); struct platform_driver *drv; if (!_dev->driver) return; drv = to_platform_driver(_dev->driver); if (drv->shutdown) drv->shutdown(dev); } static int platform_dma_configure(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); struct fwnode_handle *fwnode = dev_fwnode(dev); enum dev_dma_attr attr; int ret = 0; if (is_of_node(fwnode)) { ret = of_dma_configure(dev, to_of_node(fwnode), true); } else if (is_acpi_device_node(fwnode)) { attr = acpi_get_dma_attr(to_acpi_device_node(fwnode)); ret = acpi_dma_configure(dev, attr); } /* @drv may not be valid when we're called from the IOMMU layer */ if (ret || !dev->driver || drv->driver_managed_dma) return ret; ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); return ret; } static void platform_dma_cleanup(struct device *dev) { struct platform_driver *drv = to_platform_driver(dev->driver); if (!drv->driver_managed_dma) iommu_device_unuse_default_domain(dev); } static const struct dev_pm_ops platform_dev_pm_ops = { SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend, pm_generic_runtime_resume, NULL) USE_PLATFORM_PM_SLEEP_OPS }; const struct bus_type platform_bus_type = { .name = "platform", .dev_groups = platform_dev_groups, .match = platform_match, .uevent = platform_uevent, .probe = platform_probe, .remove = platform_remove, .shutdown = platform_shutdown, .dma_configure = platform_dma_configure, .dma_cleanup = platform_dma_cleanup, .pm = &platform_dev_pm_ops, }; EXPORT_SYMBOL_GPL(platform_bus_type); static inline int __platform_match(struct device *dev, const void *drv) { return platform_match(dev, (struct device_driver *)drv); } /** * platform_find_device_by_driver - Find a platform device with a given * driver. * @start: The device to start the search from. * @drv: The device driver to look for. */ struct device *platform_find_device_by_driver(struct device *start, const struct device_driver *drv) { return bus_find_device(&platform_bus_type, start, drv, __platform_match); } EXPORT_SYMBOL_GPL(platform_find_device_by_driver); void __weak __init early_platform_cleanup(void) { } int __init platform_bus_init(void) { int error; early_platform_cleanup(); error = device_register(&platform_bus); if (error) { put_device(&platform_bus); return error; } error = bus_register(&platform_bus_type); if (error) device_unregister(&platform_bus); return error; } |
12 12 12 12 12 12 11 12 12 12 12 11 11 11 11 11 11 11 12 12 12 12 12 12 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 | // SPDX-License-Identifier: GPL-2.0-or-later /* * dvb_frontend.c: DVB frontend tuning interface/thread * * Copyright (C) 1999-2001 Ralph Metzler * Marcus Metzler * Holger Waechtler * for convergence integrated media GmbH * * Copyright (C) 2004 Andrew de Quincey (tuning thread cleanup) */ /* Enables DVBv3 compatibility bits at the headers */ #define __DVB_CORE__ #define pr_fmt(fmt) "dvb_frontend: " fmt #include <linux/string.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/semaphore.h> #include <linux/module.h> #include <linux/nospec.h> #include <linux/list.h> #include <linux/freezer.h> #include <linux/jiffies.h> #include <linux/kthread.h> #include <linux/ktime.h> #include <linux/compat.h> #include <asm/processor.h> #include <media/dvb_frontend.h> #include <media/dvbdev.h> #include <linux/dvb/version.h> static int dvb_frontend_debug; static int dvb_shutdown_timeout; static int dvb_force_auto_inversion; static int dvb_override_tune_delay; static int dvb_powerdown_on_sleep = 1; static int dvb_mfe_wait_time = 5; module_param_named(frontend_debug, dvb_frontend_debug, int, 0644); MODULE_PARM_DESC(frontend_debug, "Turn on/off frontend core debugging (default:off)."); module_param(dvb_shutdown_timeout, int, 0644); MODULE_PARM_DESC(dvb_shutdown_timeout, "wait <shutdown_timeout> seconds after close() before suspending hardware"); module_param(dvb_force_auto_inversion, int, 0644); MODULE_PARM_DESC(dvb_force_auto_inversion, "0: normal (default), 1: INVERSION_AUTO forced always"); module_param(dvb_override_tune_delay, int, 0644); MODULE_PARM_DESC(dvb_override_tune_delay, "0: normal (default), >0 => delay in milliseconds to wait for lock after a tune attempt"); module_param(dvb_powerdown_on_sleep, int, 0644); MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB voltage off on sleep (default)"); module_param(dvb_mfe_wait_time, int, 0644); MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open() for multi-frontend to become available (default:5 seconds)"); #define dprintk(fmt, arg...) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), __func__, ##arg) #define FESTATE_IDLE 1 #define FESTATE_RETUNE 2 #define FESTATE_TUNING_FAST 4 #define FESTATE_TUNING_SLOW 8 #define FESTATE_TUNED 16 #define FESTATE_ZIGZAG_FAST 32 #define FESTATE_ZIGZAG_SLOW 64 #define FESTATE_DISEQC 128 #define FESTATE_ERROR 256 #define FESTATE_WAITFORLOCK (FESTATE_TUNING_FAST | FESTATE_TUNING_SLOW | FESTATE_ZIGZAG_FAST | FESTATE_ZIGZAG_SLOW | FESTATE_DISEQC) #define FESTATE_SEARCHING_FAST (FESTATE_TUNING_FAST | FESTATE_ZIGZAG_FAST) #define FESTATE_SEARCHING_SLOW (FESTATE_TUNING_SLOW | FESTATE_ZIGZAG_SLOW) #define FESTATE_LOSTLOCK (FESTATE_ZIGZAG_FAST | FESTATE_ZIGZAG_SLOW) /* * FESTATE_IDLE. No tuning parameters have been supplied and the loop is idling. * FESTATE_RETUNE. Parameters have been supplied, but we have not yet performed the first tune. * FESTATE_TUNING_FAST. Tuning parameters have been supplied and fast zigzag scan is in progress. * FESTATE_TUNING_SLOW. Tuning parameters have been supplied. Fast zigzag failed, so we're trying again, but slower. * FESTATE_TUNED. The frontend has successfully locked on. * FESTATE_ZIGZAG_FAST. The lock has been lost, and a fast zigzag has been initiated to try and regain it. * FESTATE_ZIGZAG_SLOW. The lock has been lost. Fast zigzag has been failed, so we're trying again, but slower. * FESTATE_DISEQC. A DISEQC command has just been issued. * FESTATE_WAITFORLOCK. When we're waiting for a lock. * FESTATE_SEARCHING_FAST. When we're searching for a signal using a fast zigzag scan. * FESTATE_SEARCHING_SLOW. When we're searching for a signal using a slow zigzag scan. * FESTATE_LOSTLOCK. When the lock has been lost, and we're searching it again. */ static DEFINE_MUTEX(frontend_mutex); struct dvb_frontend_private { /* thread/frontend values */ struct dvb_device *dvbdev; struct dvb_frontend_parameters parameters_out; struct dvb_fe_events events; struct semaphore sem; struct list_head list_head; wait_queue_head_t wait_queue; struct task_struct *thread; unsigned long release_jiffies; unsigned int wakeup; enum fe_status status; unsigned long tune_mode_flags; unsigned int delay; unsigned int reinitialise; int tone; int voltage; /* swzigzag values */ unsigned int state; unsigned int bending; int lnb_drift; unsigned int inversion; unsigned int auto_step; unsigned int auto_sub_step; unsigned int started_auto_step; unsigned int min_delay; unsigned int max_drift; unsigned int step_size; int quality; unsigned int check_wrapped; enum dvbfe_search algo_status; #if defined(CONFIG_MEDIA_CONTROLLER_DVB) struct media_pipeline pipe; #endif }; static void dvb_frontend_invoke_release(struct dvb_frontend *fe, void (*release)(struct dvb_frontend *fe)); static void __dvb_frontend_free(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fepriv) dvb_device_put(fepriv->dvbdev); dvb_frontend_invoke_release(fe, fe->ops.release); kfree(fepriv); } static void dvb_frontend_free(struct kref *ref) { struct dvb_frontend *fe = container_of(ref, struct dvb_frontend, refcount); __dvb_frontend_free(fe); } static void dvb_frontend_put(struct dvb_frontend *fe) { /* call detach before dropping the reference count */ if (fe->ops.detach) fe->ops.detach(fe); /* * Check if the frontend was registered, as otherwise * kref was not initialized yet. */ if (fe->frontend_priv) kref_put(&fe->refcount, dvb_frontend_free); else __dvb_frontend_free(fe); } static void dvb_frontend_get(struct dvb_frontend *fe) { kref_get(&fe->refcount); } static void dvb_frontend_wakeup(struct dvb_frontend *fe); static int dtv_get_frontend(struct dvb_frontend *fe, struct dtv_frontend_properties *c, struct dvb_frontend_parameters *p_out); static int dtv_property_legacy_params_sync(struct dvb_frontend *fe, const struct dtv_frontend_properties *c, struct dvb_frontend_parameters *p); static bool has_get_frontend(struct dvb_frontend *fe) { return fe->ops.get_frontend; } /* * Due to DVBv3 API calls, a delivery system should be mapped into one of * the 4 DVBv3 delivery systems (FE_QPSK, FE_QAM, FE_OFDM or FE_ATSC), * otherwise, a DVBv3 call will fail. */ enum dvbv3_emulation_type { DVBV3_UNKNOWN, DVBV3_QPSK, DVBV3_QAM, DVBV3_OFDM, DVBV3_ATSC, }; static enum dvbv3_emulation_type dvbv3_type(u32 delivery_system) { switch (delivery_system) { case SYS_DVBC_ANNEX_A: case SYS_DVBC_ANNEX_C: return DVBV3_QAM; case SYS_DVBS: case SYS_DVBS2: case SYS_TURBO: case SYS_ISDBS: case SYS_DSS: return DVBV3_QPSK; case SYS_DVBT: case SYS_DVBT2: case SYS_ISDBT: case SYS_DTMB: return DVBV3_OFDM; case SYS_ATSC: case SYS_ATSCMH: case SYS_DVBC_ANNEX_B: return DVBV3_ATSC; case SYS_UNDEFINED: case SYS_ISDBC: case SYS_DVBH: case SYS_DAB: default: /* * Doesn't know how to emulate those types and/or * there's no frontend driver from this type yet * with some emulation code, so, we're not sure yet how * to handle them, or they're not compatible with a DVBv3 call. */ return DVBV3_UNKNOWN; } } static void dvb_frontend_add_event(struct dvb_frontend *fe, enum fe_status status) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_fe_events *events = &fepriv->events; struct dvb_frontend_event *e; int wp; dev_dbg(fe->dvb->device, "%s:\n", __func__); if ((status & FE_HAS_LOCK) && has_get_frontend(fe)) dtv_get_frontend(fe, c, &fepriv->parameters_out); mutex_lock(&events->mtx); wp = (events->eventw + 1) % MAX_EVENT; if (wp == events->eventr) { events->overflow = 1; events->eventr = (events->eventr + 1) % MAX_EVENT; } e = &events->events[events->eventw]; e->status = status; e->parameters = fepriv->parameters_out; events->eventw = wp; mutex_unlock(&events->mtx); wake_up_interruptible(&events->wait_queue); } static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv, struct dvb_fe_events *events) { int ret; up(&fepriv->sem); ret = events->eventw != events->eventr; down(&fepriv->sem); return ret; } static int dvb_frontend_get_event(struct dvb_frontend *fe, struct dvb_frontend_event *event, int flags) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (events->overflow) { events->overflow = 0; return -EOVERFLOW; } if (events->eventw == events->eventr) { struct wait_queue_entry wait; int ret = 0; if (flags & O_NONBLOCK) return -EWOULDBLOCK; init_waitqueue_entry(&wait, current); add_wait_queue(&events->wait_queue, &wait); while (!dvb_frontend_test_event(fepriv, events)) { wait_woken(&wait, TASK_INTERRUPTIBLE, 0); if (signal_pending(current)) { ret = -ERESTARTSYS; break; } } remove_wait_queue(&events->wait_queue, &wait); if (ret < 0) return ret; } mutex_lock(&events->mtx); *event = events->events[events->eventr]; events->eventr = (events->eventr + 1) % MAX_EVENT; mutex_unlock(&events->mtx); return 0; } static void dvb_frontend_clear_events(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; mutex_lock(&events->mtx); events->eventr = events->eventw; mutex_unlock(&events->mtx); } static void dvb_frontend_init(struct dvb_frontend *fe) { dev_dbg(fe->dvb->device, "%s: initialising adapter %i frontend %i (%s)...\n", __func__, fe->dvb->num, fe->id, fe->ops.info.name); if (fe->ops.init) fe->ops.init(fe); if (fe->ops.tuner_ops.init) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.init(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } } void dvb_frontend_reinitialise(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; fepriv->reinitialise = 1; dvb_frontend_wakeup(fe); } EXPORT_SYMBOL(dvb_frontend_reinitialise); static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepriv, int locked) { int q2; struct dvb_frontend *fe = fepriv->dvbdev->priv; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (locked) (fepriv->quality) = (fepriv->quality * 220 + 36 * 256) / 256; else (fepriv->quality) = (fepriv->quality * 220 + 0) / 256; q2 = fepriv->quality - 128; q2 *= q2; fepriv->delay = fepriv->min_delay + q2 * HZ / (128 * 128); } /** * dvb_frontend_swzigzag_autotune - Performs automatic twiddling of frontend * parameters. * * @fe: The frontend concerned. * @check_wrapped: Checks if an iteration has completed. * DO NOT SET ON THE FIRST ATTEMPT. * * return: Number of complete iterations that have been performed. */ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped) { int autoinversion; int ready = 0; int fe_set_err = 0; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp; int original_inversion = c->inversion; u32 original_frequency = c->frequency; /* are we using autoinversion? */ autoinversion = ((!(fe->ops.info.caps & FE_CAN_INVERSION_AUTO)) && (c->inversion == INVERSION_AUTO)); /* setup parameters correctly */ while (!ready) { /* calculate the lnb_drift */ fepriv->lnb_drift = fepriv->auto_step * fepriv->step_size; /* wrap the auto_step if we've exceeded the maximum drift */ if (fepriv->lnb_drift > fepriv->max_drift) { fepriv->auto_step = 0; fepriv->auto_sub_step = 0; fepriv->lnb_drift = 0; } /* perform inversion and +/- zigzag */ switch (fepriv->auto_sub_step) { case 0: /* try with the current inversion and current drift setting */ ready = 1; break; case 1: if (!autoinversion) break; fepriv->inversion = (fepriv->inversion == INVERSION_OFF) ? INVERSION_ON : INVERSION_OFF; ready = 1; break; case 2: if (fepriv->lnb_drift == 0) break; fepriv->lnb_drift = -fepriv->lnb_drift; ready = 1; break; case 3: if (fepriv->lnb_drift == 0) break; if (!autoinversion) break; fepriv->inversion = (fepriv->inversion == INVERSION_OFF) ? INVERSION_ON : INVERSION_OFF; fepriv->lnb_drift = -fepriv->lnb_drift; ready = 1; break; default: fepriv->auto_step++; fepriv->auto_sub_step = 0; continue; } if (!ready) fepriv->auto_sub_step++; } /* if this attempt would hit where we started, indicate a complete * iteration has occurred */ if ((fepriv->auto_step == fepriv->started_auto_step) && (fepriv->auto_sub_step == 0) && check_wrapped) { return 1; } dev_dbg(fe->dvb->device, "%s: drift:%i inversion:%i auto_step:%i auto_sub_step:%i started_auto_step:%i\n", __func__, fepriv->lnb_drift, fepriv->inversion, fepriv->auto_step, fepriv->auto_sub_step, fepriv->started_auto_step); /* set the frontend itself */ c->frequency += fepriv->lnb_drift; if (autoinversion) c->inversion = fepriv->inversion; tmp = *c; if (fe->ops.set_frontend) fe_set_err = fe->ops.set_frontend(fe); *c = tmp; if (fe_set_err < 0) { fepriv->state = FESTATE_ERROR; return fe_set_err; } c->frequency = original_frequency; c->inversion = original_inversion; fepriv->auto_sub_step++; return 0; } static void dvb_frontend_swzigzag(struct dvb_frontend *fe) { enum fe_status s = FE_NONE; int retval = 0; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp; if (fepriv->max_drift) dev_warn_once(fe->dvb->device, "Frontend requested software zigzag, but didn't set the frequency step size\n"); /* if we've got no parameters, just keep idling */ if (fepriv->state & FESTATE_IDLE) { fepriv->delay = 3 * HZ; fepriv->quality = 0; return; } /* in SCAN mode, we just set the frontend when asked and leave it alone */ if (fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT) { if (fepriv->state & FESTATE_RETUNE) { tmp = *c; if (fe->ops.set_frontend) retval = fe->ops.set_frontend(fe); *c = tmp; if (retval < 0) fepriv->state = FESTATE_ERROR; else fepriv->state = FESTATE_TUNED; } fepriv->delay = 3 * HZ; fepriv->quality = 0; return; } /* get the frontend status */ if (fepriv->state & FESTATE_RETUNE) { s = 0; } else { if (fe->ops.read_status) fe->ops.read_status(fe, &s); if (s != fepriv->status) { dvb_frontend_add_event(fe, s); fepriv->status = s; } } /* if we're not tuned, and we have a lock, move to the TUNED state */ if ((fepriv->state & FESTATE_WAITFORLOCK) && (s & FE_HAS_LOCK)) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); fepriv->state = FESTATE_TUNED; /* if we're tuned, then we have determined the correct inversion */ if ((!(fe->ops.info.caps & FE_CAN_INVERSION_AUTO)) && (c->inversion == INVERSION_AUTO)) { c->inversion = fepriv->inversion; } return; } /* if we are tuned already, check we're still locked */ if (fepriv->state & FESTATE_TUNED) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); /* we're tuned, and the lock is still good... */ if (s & FE_HAS_LOCK) { return; } else { /* if we _WERE_ tuned, but now don't have a lock */ fepriv->state = FESTATE_ZIGZAG_FAST; fepriv->started_auto_step = fepriv->auto_step; fepriv->check_wrapped = 0; } } /* don't actually do anything if we're in the LOSTLOCK state, * the frontend is set to FE_CAN_RECOVER, and the max_drift is 0 */ if ((fepriv->state & FESTATE_LOSTLOCK) && (fe->ops.info.caps & FE_CAN_RECOVER) && (fepriv->max_drift == 0)) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); return; } /* don't do anything if we're in the DISEQC state, since this * might be someone with a motorized dish controlled by DISEQC. * If its actually a re-tune, there will be a SET_FRONTEND soon enough. */ if (fepriv->state & FESTATE_DISEQC) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); return; } /* if we're in the RETUNE state, set everything up for a brand * new scan, keeping the current inversion setting, as the next * tune is _very_ likely to require the same */ if (fepriv->state & FESTATE_RETUNE) { fepriv->lnb_drift = 0; fepriv->auto_step = 0; fepriv->auto_sub_step = 0; fepriv->started_auto_step = 0; fepriv->check_wrapped = 0; } /* fast zigzag. */ if ((fepriv->state & FESTATE_SEARCHING_FAST) || (fepriv->state & FESTATE_RETUNE)) { fepriv->delay = fepriv->min_delay; /* perform a tune */ retval = dvb_frontend_swzigzag_autotune(fe, fepriv->check_wrapped); if (retval < 0) { return; } else if (retval) { /* OK, if we've run out of trials at the fast speed. * Drop back to slow for the _next_ attempt */ fepriv->state = FESTATE_SEARCHING_SLOW; fepriv->started_auto_step = fepriv->auto_step; return; } fepriv->check_wrapped = 1; /* if we've just re-tuned, enter the ZIGZAG_FAST state. * This ensures we cannot return from an * FE_SET_FRONTEND ioctl before the first frontend tune * occurs */ if (fepriv->state & FESTATE_RETUNE) { fepriv->state = FESTATE_TUNING_FAST; } } /* slow zigzag */ if (fepriv->state & FESTATE_SEARCHING_SLOW) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); /* Note: don't bother checking for wrapping; we stay in this * state until we get a lock */ dvb_frontend_swzigzag_autotune(fe, 0); } } static int dvb_frontend_is_exiting(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fe->exit != DVB_FE_NO_EXIT) return 1; if (fepriv->dvbdev->writers == 1) if (time_after_eq(jiffies, fepriv->release_jiffies + dvb_shutdown_timeout * HZ)) return 1; return 0; } static int dvb_frontend_should_wakeup(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fepriv->wakeup) { fepriv->wakeup = 0; return 1; } return dvb_frontend_is_exiting(fe); } static void dvb_frontend_wakeup(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; fepriv->wakeup = 1; wake_up_interruptible(&fepriv->wait_queue); } static int dvb_frontend_thread(void *data) { struct dvb_frontend *fe = data; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_frontend_private *fepriv = fe->frontend_priv; enum fe_status s = FE_NONE; enum dvbfe_algo algo; bool re_tune = false; bool semheld = false; dev_dbg(fe->dvb->device, "%s:\n", __func__); fepriv->check_wrapped = 0; fepriv->quality = 0; fepriv->delay = 3 * HZ; fepriv->status = 0; fepriv->wakeup = 0; fepriv->reinitialise = 0; dvb_frontend_init(fe); set_freezable(); while (1) { up(&fepriv->sem); /* is locked when we enter the thread... */ wait_event_freezable_timeout(fepriv->wait_queue, dvb_frontend_should_wakeup(fe) || kthread_should_stop(), fepriv->delay); if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) { /* got signal or quitting */ if (!down_interruptible(&fepriv->sem)) semheld = true; fe->exit = DVB_FE_NORMAL_EXIT; break; } if (down_interruptible(&fepriv->sem)) break; if (fepriv->reinitialise) { dvb_frontend_init(fe); if (fe->ops.set_tone && fepriv->tone != -1) fe->ops.set_tone(fe, fepriv->tone); if (fe->ops.set_voltage && fepriv->voltage != -1) fe->ops.set_voltage(fe, fepriv->voltage); fepriv->reinitialise = 0; } /* do an iteration of the tuning loop */ if (fe->ops.get_frontend_algo) { algo = fe->ops.get_frontend_algo(fe); switch (algo) { case DVBFE_ALGO_HW: dev_dbg(fe->dvb->device, "%s: Frontend ALGO = DVBFE_ALGO_HW\n", __func__); if (fepriv->state & FESTATE_RETUNE) { dev_dbg(fe->dvb->device, "%s: Retune requested, FESTATE_RETUNE\n", __func__); re_tune = true; fepriv->state = FESTATE_TUNED; } else { re_tune = false; } if (fe->ops.tune) fe->ops.tune(fe, re_tune, fepriv->tune_mode_flags, &fepriv->delay, &s); if (s != fepriv->status && !(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) { dev_dbg(fe->dvb->device, "%s: state changed, adding current state\n", __func__); dvb_frontend_add_event(fe, s); fepriv->status = s; } break; case DVBFE_ALGO_SW: dev_dbg(fe->dvb->device, "%s: Frontend ALGO = DVBFE_ALGO_SW\n", __func__); dvb_frontend_swzigzag(fe); break; case DVBFE_ALGO_CUSTOM: dev_dbg(fe->dvb->device, "%s: Frontend ALGO = DVBFE_ALGO_CUSTOM, state=%d\n", __func__, fepriv->state); if (fepriv->state & FESTATE_RETUNE) { dev_dbg(fe->dvb->device, "%s: Retune requested, FESTAT_RETUNE\n", __func__); fepriv->state = FESTATE_TUNED; } /* Case where we are going to search for a carrier * User asked us to retune again for some reason, possibly * requesting a search with a new set of parameters */ if (fepriv->algo_status & DVBFE_ALGO_SEARCH_AGAIN) { if (fe->ops.search) { fepriv->algo_status = fe->ops.search(fe); /* We did do a search as was requested, the flags are * now unset as well and has the flags wrt to search. */ } else { fepriv->algo_status &= ~DVBFE_ALGO_SEARCH_AGAIN; } } /* Track the carrier if the search was successful */ if (fepriv->algo_status != DVBFE_ALGO_SEARCH_SUCCESS) { fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; fepriv->delay = HZ / 2; } dtv_property_legacy_params_sync(fe, c, &fepriv->parameters_out); fe->ops.read_status(fe, &s); if (s != fepriv->status) { dvb_frontend_add_event(fe, s); /* update event list */ fepriv->status = s; if (!(s & FE_HAS_LOCK)) { fepriv->delay = HZ / 10; fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; } else { fepriv->delay = 60 * HZ; } } break; default: dev_dbg(fe->dvb->device, "%s: UNDEFINED ALGO !\n", __func__); break; } } else { dvb_frontend_swzigzag(fe); } } if (dvb_powerdown_on_sleep) { if (fe->ops.set_voltage) fe->ops.set_voltage(fe, SEC_VOLTAGE_OFF); if (fe->ops.tuner_ops.sleep) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.sleep(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } if (fe->ops.sleep) fe->ops.sleep(fe); } fepriv->thread = NULL; if (kthread_should_stop()) fe->exit = DVB_FE_DEVICE_REMOVED; else fe->exit = DVB_FE_NO_EXIT; mb(); if (semheld) up(&fepriv->sem); dvb_frontend_wakeup(fe); return 0; } static void dvb_frontend_stop(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (fe->exit != DVB_FE_DEVICE_REMOVED) fe->exit = DVB_FE_NORMAL_EXIT; mb(); if (!fepriv->thread) return; kthread_stop(fepriv->thread); sema_init(&fepriv->sem, 1); fepriv->state = FESTATE_IDLE; /* paranoia check in case a signal arrived */ if (fepriv->thread) dev_warn(fe->dvb->device, "dvb_frontend_stop: warning: thread %p won't exit\n", fepriv->thread); } /* * Sleep for the amount of time given by add_usec parameter * * This needs to be as precise as possible, as it affects the detection of * the dish tone command at the satellite subsystem. The precision is improved * by using a scheduled msleep followed by udelay for the remainder. */ void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec) { s32 delta; *waketime = ktime_add_us(*waketime, add_usec); delta = ktime_us_delta(ktime_get_boottime(), *waketime); if (delta > 2500) { msleep((delta - 1500) / 1000); delta = ktime_us_delta(ktime_get_boottime(), *waketime); } if (delta > 0) udelay(delta); } EXPORT_SYMBOL(dvb_frontend_sleep_until); static int dvb_frontend_start(struct dvb_frontend *fe) { int ret; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct task_struct *fe_thread; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (fepriv->thread) { if (fe->exit == DVB_FE_NO_EXIT) return 0; else dvb_frontend_stop(fe); } if (signal_pending(current)) return -EINTR; if (down_interruptible(&fepriv->sem)) return -EINTR; fepriv->state = FESTATE_IDLE; fe->exit = DVB_FE_NO_EXIT; fepriv->thread = NULL; mb(); fe_thread = kthread_run(dvb_frontend_thread, fe, "kdvb-ad-%i-fe-%i", fe->dvb->num, fe->id); if (IS_ERR(fe_thread)) { ret = PTR_ERR(fe_thread); dev_warn(fe->dvb->device, "dvb_frontend_start: failed to start kthread (%d)\n", ret); up(&fepriv->sem); return ret; } fepriv->thread = fe_thread; return 0; } static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe, u32 *freq_min, u32 *freq_max, u32 *tolerance) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 tuner_min = fe->ops.tuner_ops.info.frequency_min_hz; u32 tuner_max = fe->ops.tuner_ops.info.frequency_max_hz; u32 frontend_min = fe->ops.info.frequency_min_hz; u32 frontend_max = fe->ops.info.frequency_max_hz; *freq_min = max(frontend_min, tuner_min); if (frontend_max == 0) *freq_max = tuner_max; else if (tuner_max == 0) *freq_max = frontend_max; else *freq_max = min(frontend_max, tuner_max); if (*freq_min == 0 || *freq_max == 0) dev_warn(fe->dvb->device, "DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n", fe->dvb->num, fe->id); dev_dbg(fe->dvb->device, "frequency interval: tuner: %u...%u, frontend: %u...%u", tuner_min, tuner_max, frontend_min, frontend_max); /* If the standard is for satellite, convert frequencies to kHz */ switch (c->delivery_system) { case SYS_DSS: case SYS_DVBS: case SYS_DVBS2: case SYS_TURBO: case SYS_ISDBS: *freq_min /= kHz; *freq_max /= kHz; if (tolerance) *tolerance = fe->ops.info.frequency_tolerance_hz / kHz; break; default: if (tolerance) *tolerance = fe->ops.info.frequency_tolerance_hz; break; } } static u32 dvb_frontend_get_stepsize(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 fe_step = fe->ops.info.frequency_stepsize_hz; u32 tuner_step = fe->ops.tuner_ops.info.frequency_step_hz; u32 step = max(fe_step, tuner_step); switch (c->delivery_system) { case SYS_DSS: case SYS_DVBS: case SYS_DVBS2: case SYS_TURBO: case SYS_ISDBS: step /= kHz; break; default: break; } return step; } static int dvb_frontend_check_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 freq_min; u32 freq_max; /* range check: frequency */ dvb_frontend_get_frequency_limits(fe, &freq_min, &freq_max, NULL); if ((freq_min && c->frequency < freq_min) || (freq_max && c->frequency > freq_max)) { dev_warn(fe->dvb->device, "DVB: adapter %i frontend %i frequency %u out of range (%u..%u)\n", fe->dvb->num, fe->id, c->frequency, freq_min, freq_max); return -EINVAL; } /* range check: symbol rate */ switch (c->delivery_system) { case SYS_DSS: case SYS_DVBS: case SYS_DVBS2: case SYS_TURBO: case SYS_DVBC_ANNEX_A: case SYS_DVBC_ANNEX_C: if ((fe->ops.info.symbol_rate_min && c->symbol_rate < fe->ops.info.symbol_rate_min) || (fe->ops.info.symbol_rate_max && c->symbol_rate > fe->ops.info.symbol_rate_max)) { dev_warn(fe->dvb->device, "DVB: adapter %i frontend %i symbol rate %u out of range (%u..%u)\n", fe->dvb->num, fe->id, c->symbol_rate, fe->ops.info.symbol_rate_min, fe->ops.info.symbol_rate_max); return -EINVAL; } break; default: break; } return 0; } static int dvb_frontend_clear_cache(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; int i; u32 delsys; delsys = c->delivery_system; memset(c, 0, offsetof(struct dtv_frontend_properties, strength)); c->delivery_system = delsys; dev_dbg(fe->dvb->device, "%s: Clearing cache for delivery system %d\n", __func__, c->delivery_system); c->transmission_mode = TRANSMISSION_MODE_AUTO; c->bandwidth_hz = 0; /* AUTO */ c->guard_interval = GUARD_INTERVAL_AUTO; c->hierarchy = HIERARCHY_AUTO; c->symbol_rate = 0; c->code_rate_HP = FEC_AUTO; c->code_rate_LP = FEC_AUTO; c->fec_inner = FEC_AUTO; c->rolloff = ROLLOFF_AUTO; c->voltage = SEC_VOLTAGE_OFF; c->sectone = SEC_TONE_OFF; c->pilot = PILOT_AUTO; c->isdbt_partial_reception = 0; c->isdbt_sb_mode = 0; c->isdbt_sb_subchannel = 0; c->isdbt_sb_segment_idx = 0; c->isdbt_sb_segment_count = 0; c->isdbt_layer_enabled = 7; /* All layers (A,B,C) */ for (i = 0; i < 3; i++) { c->layer[i].fec = FEC_AUTO; c->layer[i].modulation = QAM_AUTO; c->layer[i].interleaving = 0; c->layer[i].segment_count = 0; } c->stream_id = NO_STREAM_ID_FILTER; c->scrambling_sequence_index = 0;/* default sequence */ switch (c->delivery_system) { case SYS_DSS: c->modulation = QPSK; c->rolloff = ROLLOFF_20; break; case SYS_DVBS: case SYS_DVBS2: case SYS_TURBO: c->modulation = QPSK; /* implied for DVB-S in legacy API */ c->rolloff = ROLLOFF_35;/* implied for DVB-S */ break; case SYS_ATSC: c->modulation = VSB_8; break; case SYS_ISDBS: c->symbol_rate = 28860000; c->rolloff = ROLLOFF_35; c->bandwidth_hz = c->symbol_rate / 100 * 135; break; default: c->modulation = QAM_AUTO; break; } c->lna = LNA_AUTO; return 0; } #define _DTV_CMD(n) \ [n] = #n static char *dtv_cmds[DTV_MAX_COMMAND + 1] = { _DTV_CMD(DTV_TUNE), _DTV_CMD(DTV_CLEAR), /* Set */ _DTV_CMD(DTV_FREQUENCY), _DTV_CMD(DTV_BANDWIDTH_HZ), _DTV_CMD(DTV_MODULATION), _DTV_CMD(DTV_INVERSION), _DTV_CMD(DTV_DISEQC_MASTER), _DTV_CMD(DTV_SYMBOL_RATE), _DTV_CMD(DTV_INNER_FEC), _DTV_CMD(DTV_VOLTAGE), _DTV_CMD(DTV_TONE), _DTV_CMD(DTV_PILOT), _DTV_CMD(DTV_ROLLOFF), _DTV_CMD(DTV_DELIVERY_SYSTEM), _DTV_CMD(DTV_HIERARCHY), _DTV_CMD(DTV_CODE_RATE_HP), _DTV_CMD(DTV_CODE_RATE_LP), _DTV_CMD(DTV_GUARD_INTERVAL), _DTV_CMD(DTV_TRANSMISSION_MODE), _DTV_CMD(DTV_INTERLEAVING), _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION), _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING), _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID), _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX), _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT), _DTV_CMD(DTV_ISDBT_LAYER_ENABLED), _DTV_CMD(DTV_ISDBT_LAYERA_FEC), _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION), _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT), _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING), _DTV_CMD(DTV_ISDBT_LAYERB_FEC), _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION), _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT), _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING), _DTV_CMD(DTV_ISDBT_LAYERC_FEC), _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION), _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT), _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING), _DTV_CMD(DTV_STREAM_ID), _DTV_CMD(DTV_DVBT2_PLP_ID_LEGACY), _DTV_CMD(DTV_SCRAMBLING_SEQUENCE_INDEX), _DTV_CMD(DTV_LNA), /* Get */ _DTV_CMD(DTV_DISEQC_SLAVE_REPLY), _DTV_CMD(DTV_API_VERSION), _DTV_CMD(DTV_ENUM_DELSYS), _DTV_CMD(DTV_ATSCMH_PARADE_ID), _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE), _DTV_CMD(DTV_ATSCMH_FIC_VER), _DTV_CMD(DTV_ATSCMH_NOG), _DTV_CMD(DTV_ATSCMH_TNOG), _DTV_CMD(DTV_ATSCMH_SGN), _DTV_CMD(DTV_ATSCMH_PRC), _DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE), _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI), _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC), _DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE), _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_A), _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B), _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C), _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D), /* Statistics API */ _DTV_CMD(DTV_STAT_SIGNAL_STRENGTH), _DTV_CMD(DTV_STAT_CNR), _DTV_CMD(DTV_STAT_PRE_ERROR_BIT_COUNT), _DTV_CMD(DTV_STAT_PRE_TOTAL_BIT_COUNT), _DTV_CMD(DTV_STAT_POST_ERROR_BIT_COUNT), _DTV_CMD(DTV_STAT_POST_TOTAL_BIT_COUNT), _DTV_CMD(DTV_STAT_ERROR_BLOCK_COUNT), _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT), }; static char *dtv_cmd_name(u32 cmd) { cmd = array_index_nospec(cmd, DTV_MAX_COMMAND); return dtv_cmds[cmd]; } /* Synchronise the legacy tuning parameters into the cache, so that demodulator * drivers can use a single set_frontend tuning function, regardless of whether * it's being used for the legacy or new API, reducing code and complexity. */ static int dtv_property_cache_sync(struct dvb_frontend *fe, struct dtv_frontend_properties *c, const struct dvb_frontend_parameters *p) { c->frequency = p->frequency; c->inversion = p->inversion; switch (dvbv3_type(c->delivery_system)) { case DVBV3_QPSK: dev_dbg(fe->dvb->device, "%s: Preparing QPSK req\n", __func__); c->symbol_rate = p->u.qpsk.symbol_rate; c->fec_inner = p->u.qpsk.fec_inner; break; case DVBV3_QAM: dev_dbg(fe->dvb->device, "%s: Preparing QAM req\n", __func__); c->symbol_rate = p->u.qam.symbol_rate; c->fec_inner = p->u.qam.fec_inner; c->modulation = p->u.qam.modulation; break; case DVBV3_OFDM: dev_dbg(fe->dvb->device, "%s: Preparing OFDM req\n", __func__); switch (p->u.ofdm.bandwidth) { case BANDWIDTH_10_MHZ: c->bandwidth_hz = 10000000; break; case BANDWIDTH_8_MHZ: c->bandwidth_hz = 8000000; break; case BANDWIDTH_7_MHZ: c->bandwidth_hz = 7000000; break; case BANDWIDTH_6_MHZ: c->bandwidth_hz = 6000000; break; case BANDWIDTH_5_MHZ: c->bandwidth_hz = 5000000; break; case BANDWIDTH_1_712_MHZ: c->bandwidth_hz = 1712000; break; case BANDWIDTH_AUTO: c->bandwidth_hz = 0; } c->code_rate_HP = p->u.ofdm.code_rate_HP; c->code_rate_LP = p->u.ofdm.code_rate_LP; c->modulation = p->u.ofdm.constellation; c->transmission_mode = p->u.ofdm.transmission_mode; c->guard_interval = p->u.ofdm.guard_interval; c->hierarchy = p->u.ofdm.hierarchy_information; break; case DVBV3_ATSC: dev_dbg(fe->dvb->device, "%s: Preparing ATSC req\n", __func__); c->modulation = p->u.vsb.modulation; if (c->delivery_system == SYS_ATSCMH) break; if ((c->modulation == VSB_8) || (c->modulation == VSB_16)) c->delivery_system = SYS_ATSC; else c->delivery_system = SYS_DVBC_ANNEX_B; break; case DVBV3_UNKNOWN: dev_err(fe->dvb->device, "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n", __func__, c->delivery_system); return -EINVAL; } return 0; } /* Ensure the cached values are set correctly in the frontend * legacy tuning structures, for the advanced tuning API. */ static int dtv_property_legacy_params_sync(struct dvb_frontend *fe, const struct dtv_frontend_properties *c, struct dvb_frontend_parameters *p) { p->frequency = c->frequency; p->inversion = c->inversion; switch (dvbv3_type(c->delivery_system)) { case DVBV3_UNKNOWN: dev_err(fe->dvb->device, "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n", __func__, c->delivery_system); return -EINVAL; case DVBV3_QPSK: dev_dbg(fe->dvb->device, "%s: Preparing QPSK req\n", __func__); p->u.qpsk.symbol_rate = c->symbol_rate; p->u.qpsk.fec_inner = c->fec_inner; break; case DVBV3_QAM: dev_dbg(fe->dvb->device, "%s: Preparing QAM req\n", __func__); p->u.qam.symbol_rate = c->symbol_rate; p->u.qam.fec_inner = c->fec_inner; p->u.qam.modulation = c->modulation; break; case DVBV3_OFDM: dev_dbg(fe->dvb->device, "%s: Preparing OFDM req\n", __func__); switch (c->bandwidth_hz) { case 10000000: p->u.ofdm.bandwidth = BANDWIDTH_10_MHZ; break; case 8000000: p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; break; case 7000000: p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ; break; case 6000000: p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ; break; case 5000000: p->u.ofdm.bandwidth = BANDWIDTH_5_MHZ; break; case 1712000: p->u.ofdm.bandwidth = BANDWIDTH_1_712_MHZ; break; case 0: default: p->u.ofdm.bandwidth = BANDWIDTH_AUTO; } p->u.ofdm.code_rate_HP = c->code_rate_HP; p->u.ofdm.code_rate_LP = c->code_rate_LP; p->u.ofdm.constellation = c->modulation; p->u.ofdm.transmission_mode = c->transmission_mode; p->u.ofdm.guard_interval = c->guard_interval; p->u.ofdm.hierarchy_information = c->hierarchy; break; case DVBV3_ATSC: dev_dbg(fe->dvb->device, "%s: Preparing VSB req\n", __func__); p->u.vsb.modulation = c->modulation; break; } return 0; } /** * dtv_get_frontend - calls a callback for retrieving DTV parameters * @fe: struct dvb_frontend pointer * @c: struct dtv_frontend_properties pointer (DVBv5 cache) * @p_out: struct dvb_frontend_parameters pointer (DVBv3 FE struct) * * This routine calls either the DVBv3 or DVBv5 get_frontend call. * If c is not null, it will update the DVBv5 cache struct pointed by it. * If p_out is not null, it will update the DVBv3 params pointed by it. */ static int dtv_get_frontend(struct dvb_frontend *fe, struct dtv_frontend_properties *c, struct dvb_frontend_parameters *p_out) { int r; if (fe->ops.get_frontend) { r = fe->ops.get_frontend(fe, c); if (unlikely(r < 0)) return r; if (p_out) dtv_property_legacy_params_sync(fe, c, p_out); return 0; } /* As everything is in cache, get_frontend fops are always supported */ return 0; } static int dvb_frontend_handle_ioctl(struct file *file, unsigned int cmd, void *parg); static int dtv_property_process_get(struct dvb_frontend *fe, const struct dtv_frontend_properties *c, struct dtv_property *tvp, struct file *file) { int ncaps; unsigned int len = 1; switch (tvp->cmd) { case DTV_ENUM_DELSYS: ncaps = 0; while (ncaps < MAX_DELSYS && fe->ops.delsys[ncaps]) { tvp->u.buffer.data[ncaps] = fe->ops.delsys[ncaps]; ncaps++; } tvp->u.buffer.len = ncaps; len = ncaps; break; case DTV_FREQUENCY: tvp->u.data = c->frequency; break; case DTV_MODULATION: tvp->u.data = c->modulation; break; case DTV_BANDWIDTH_HZ: tvp->u.data = c->bandwidth_hz; break; case DTV_INVERSION: tvp->u.data = c->inversion; break; case DTV_SYMBOL_RATE: tvp->u.data = c->symbol_rate; break; case DTV_INNER_FEC: tvp->u.data = c->fec_inner; break; case DTV_PILOT: tvp->u.data = c->pilot; break; case DTV_ROLLOFF: tvp->u.data = c->rolloff; break; case DTV_DELIVERY_SYSTEM: tvp->u.data = c->delivery_system; break; case DTV_VOLTAGE: tvp->u.data = c->voltage; break; case DTV_TONE: tvp->u.data = c->sectone; break; case DTV_API_VERSION: tvp->u.data = (DVB_API_VERSION << 8) | DVB_API_VERSION_MINOR; break; case DTV_CODE_RATE_HP: tvp->u.data = c->code_rate_HP; break; case DTV_CODE_RATE_LP: tvp->u.data = c->code_rate_LP; break; case DTV_GUARD_INTERVAL: tvp->u.data = c->guard_interval; break; case DTV_TRANSMISSION_MODE: tvp->u.data = c->transmission_mode; break; case DTV_HIERARCHY: tvp->u.data = c->hierarchy; break; case DTV_INTERLEAVING: tvp->u.data = c->interleaving; break; /* ISDB-T Support here */ case DTV_ISDBT_PARTIAL_RECEPTION: tvp->u.data = c->isdbt_partial_reception; break; case DTV_ISDBT_SOUND_BROADCASTING: tvp->u.data = c->isdbt_sb_mode; break; case DTV_ISDBT_SB_SUBCHANNEL_ID: tvp->u.data = c->isdbt_sb_subchannel; break; case DTV_ISDBT_SB_SEGMENT_IDX: tvp->u.data = c->isdbt_sb_segment_idx; break; case DTV_ISDBT_SB_SEGMENT_COUNT: tvp->u.data = c->isdbt_sb_segment_count; break; case DTV_ISDBT_LAYER_ENABLED: tvp->u.data = c->isdbt_layer_enabled; break; case DTV_ISDBT_LAYERA_FEC: tvp->u.data = c->layer[0].fec; break; case DTV_ISDBT_LAYERA_MODULATION: tvp->u.data = c->layer[0].modulation; break; case DTV_ISDBT_LAYERA_SEGMENT_COUNT: tvp->u.data = c->layer[0].segment_count; break; case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: tvp->u.data = c->layer[0].interleaving; break; case DTV_ISDBT_LAYERB_FEC: tvp->u.data = c->layer[1].fec; break; case DTV_ISDBT_LAYERB_MODULATION: tvp->u.data = c->layer[1].modulation; break; case DTV_ISDBT_LAYERB_SEGMENT_COUNT: tvp->u.data = c->layer[1].segment_count; break; case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: tvp->u.data = c->layer[1].interleaving; break; case DTV_ISDBT_LAYERC_FEC: tvp->u.data = c->layer[2].fec; break; case DTV_ISDBT_LAYERC_MODULATION: tvp->u.data = c->layer[2].modulation; break; case DTV_ISDBT_LAYERC_SEGMENT_COUNT: tvp->u.data = c->layer[2].segment_count; break; case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: tvp->u.data = c->layer[2].interleaving; break; /* Multistream support */ case DTV_STREAM_ID: case DTV_DVBT2_PLP_ID_LEGACY: tvp->u.data = c->stream_id; break; /* Physical layer scrambling support */ case DTV_SCRAMBLING_SEQUENCE_INDEX: tvp->u.data = c->scrambling_sequence_index; break; /* ATSC-MH */ case DTV_ATSCMH_FIC_VER: tvp->u.data = fe->dtv_property_cache.atscmh_fic_ver; break; case DTV_ATSCMH_PARADE_ID: tvp->u.data = fe->dtv_property_cache.atscmh_parade_id; break; case DTV_ATSCMH_NOG: tvp->u.data = fe->dtv_property_cache.atscmh_nog; break; case DTV_ATSCMH_TNOG: tvp->u.data = fe->dtv_property_cache.atscmh_tnog; break; case DTV_ATSCMH_SGN: tvp->u.data = fe->dtv_property_cache.atscmh_sgn; break; case DTV_ATSCMH_PRC: tvp->u.data = fe->dtv_property_cache.atscmh_prc; break; case DTV_ATSCMH_RS_FRAME_MODE: tvp->u.data = fe->dtv_property_cache.atscmh_rs_frame_mode; break; case DTV_ATSCMH_RS_FRAME_ENSEMBLE: tvp->u.data = fe->dtv_property_cache.atscmh_rs_frame_ensemble; break; case DTV_ATSCMH_RS_CODE_MODE_PRI: tvp->u.data = fe->dtv_property_cache.atscmh_rs_code_mode_pri; break; case DTV_ATSCMH_RS_CODE_MODE_SEC: tvp->u.data = fe->dtv_property_cache.atscmh_rs_code_mode_sec; break; case DTV_ATSCMH_SCCC_BLOCK_MODE: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_block_mode; break; case DTV_ATSCMH_SCCC_CODE_MODE_A: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_a; break; case DTV_ATSCMH_SCCC_CODE_MODE_B: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_b; break; case DTV_ATSCMH_SCCC_CODE_MODE_C: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_c; break; case DTV_ATSCMH_SCCC_CODE_MODE_D: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_d; break; case DTV_LNA: tvp->u.data = c->lna; break; /* Fill quality measures */ case DTV_STAT_SIGNAL_STRENGTH: tvp->u.st = c->strength; if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32)) tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32); len = tvp->u.buffer.len; break; case DTV_STAT_CNR: tvp->u.st = c->cnr; if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32)) tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32); len = tvp->u.buffer.len; break; case DTV_STAT_PRE_ERROR_BIT_COUNT: tvp->u.st = c->pre_bit_error; if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32)) tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32); len = tvp->u.buffer.len; break; case DTV_STAT_PRE_TOTAL_BIT_COUNT: tvp->u.st = c->pre_bit_count; if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32)) tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32); len = tvp->u.buffer.len; break; case DTV_STAT_POST_ERROR_BIT_COUNT: tvp->u.st = c->post_bit_error; if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32)) tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32); len = tvp->u.buffer.len; break; case DTV_STAT_POST_TOTAL_BIT_COUNT: tvp->u.st = c->post_bit_count; if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32)) tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32); len = tvp->u.buffer.len; break; case DTV_STAT_ERROR_BLOCK_COUNT: tvp->u.st = c->block_error; if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32)) tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32); len = tvp->u.buffer.len; break; case DTV_STAT_TOTAL_BLOCK_COUNT: tvp->u.st = c->block_count; if (tvp->u.buffer.len > MAX_DTV_STATS * sizeof(u32)) tvp->u.buffer.len = MAX_DTV_STATS * sizeof(u32); len = tvp->u.buffer.len; break; default: dev_dbg(fe->dvb->device, "%s: FE property %d doesn't exist\n", __func__, tvp->cmd); return -EINVAL; } if (len < 1) len = 1; dev_dbg(fe->dvb->device, "%s: GET cmd 0x%08x (%s) len %d: %*ph\n", __func__, tvp->cmd, dtv_cmd_name(tvp->cmd), tvp->u.buffer.len, tvp->u.buffer.len, tvp->u.buffer.data); return 0; } static int dtv_set_frontend(struct dvb_frontend *fe); static bool is_dvbv3_delsys(u32 delsys) { return (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) || (delsys == SYS_DVBS) || (delsys == SYS_ATSC); } /** * emulate_delivery_system - emulate a DVBv5 delivery system with a DVBv3 type * @fe: struct frontend; * @delsys: DVBv5 type that will be used for emulation * * Provides emulation for delivery systems that are compatible with the old * DVBv3 call. Among its usages, it provices support for ISDB-T, and allows * using a DVB-S2 only frontend just like it were a DVB-S, if the frontend * parameters are compatible with DVB-S spec. */ static int emulate_delivery_system(struct dvb_frontend *fe, u32 delsys) { int i; struct dtv_frontend_properties *c = &fe->dtv_property_cache; c->delivery_system = delsys; /* * If the call is for ISDB-T, put it into full-seg, auto mode, TV */ if (c->delivery_system == SYS_ISDBT) { dev_dbg(fe->dvb->device, "%s: Using defaults for SYS_ISDBT\n", __func__); if (!c->bandwidth_hz) c->bandwidth_hz = 6000000; c->isdbt_partial_reception = 0; c->isdbt_sb_mode = 0; c->isdbt_sb_subchannel = 0; c->isdbt_sb_segment_idx = 0; c->isdbt_sb_segment_count = 0; c->isdbt_layer_enabled = 7; for (i = 0; i < 3; i++) { c->layer[i].fec = FEC_AUTO; c->layer[i].modulation = QAM_AUTO; c->layer[i].interleaving = 0; c->layer[i].segment_count = 0; } } dev_dbg(fe->dvb->device, "%s: change delivery system on cache to %d\n", __func__, c->delivery_system); return 0; } /** * dvbv5_set_delivery_system - Sets the delivery system for a DVBv5 API call * @fe: frontend struct * @desired_system: delivery system requested by the user * * A DVBv5 call know what's the desired system it wants. So, set it. * * There are, however, a few known issues with early DVBv5 applications that * are also handled by this logic: * * 1) Some early apps use SYS_UNDEFINED as the desired delivery system. * This is an API violation, but, as we don't want to break userspace, * convert it to the first supported delivery system. * 2) Some apps might be using a DVBv5 call in a wrong way, passing, for * example, SYS_DVBT instead of SYS_ISDBT. This is because early usage of * ISDB-T provided backward compat with DVB-T. */ static int dvbv5_set_delivery_system(struct dvb_frontend *fe, u32 desired_system) { int ncaps; u32 delsys = SYS_UNDEFINED; struct dtv_frontend_properties *c = &fe->dtv_property_cache; enum dvbv3_emulation_type type; /* * It was reported that some old DVBv5 applications were * filling delivery_system with SYS_UNDEFINED. If this happens, * assume that the application wants to use the first supported * delivery system. */ if (desired_system == SYS_UNDEFINED) desired_system = fe->ops.delsys[0]; /* * This is a DVBv5 call. So, it likely knows the supported * delivery systems. So, check if the desired delivery system is * supported */ ncaps = 0; while (ncaps < MAX_DELSYS && fe->ops.delsys[ncaps]) { if (fe->ops.delsys[ncaps] == desired_system) { c->delivery_system = desired_system; dev_dbg(fe->dvb->device, "%s: Changing delivery system to %d\n", __func__, desired_system); return 0; } ncaps++; } /* * The requested delivery system isn't supported. Maybe userspace * is requesting a DVBv3 compatible delivery system. * * The emulation only works if the desired system is one of the * delivery systems supported by DVBv3 API */ if (!is_dvbv3_delsys(desired_system)) { dev_dbg(fe->dvb->device, "%s: Delivery system %d not supported.\n", __func__, desired_system); return -EINVAL; } type = dvbv3_type(desired_system); /* * Get the last non-DVBv3 delivery system that has the same type * of the desired system */ ncaps = 0; while (ncaps < MAX_DELSYS && fe->ops.delsys[ncaps]) { if (dvbv3_type(fe->ops.delsys[ncaps]) == type) delsys = fe->ops.delsys[ncaps]; ncaps++; } /* There's nothing compatible with the desired delivery system */ if (delsys == SYS_UNDEFINED) { dev_dbg(fe->dvb->device, "%s: Delivery system %d not supported on emulation mode.\n", __func__, desired_system); return -EINVAL; } dev_dbg(fe->dvb->device, "%s: Using delivery system %d emulated as if it were %d\n", __func__, delsys, desired_system); return emulate_delivery_system(fe, desired_system); } /** * dvbv3_set_delivery_system - Sets the delivery system for a DVBv3 API call * @fe: frontend struct * * A DVBv3 call doesn't know what's the desired system it wants. It also * doesn't allow to switch between different types. Due to that, userspace * should use DVBv5 instead. * However, in order to avoid breaking userspace API, limited backward * compatibility support is provided. * * There are some delivery systems that are incompatible with DVBv3 calls. * * This routine should work fine for frontends that support just one delivery * system. * * For frontends that support multiple frontends: * 1) It defaults to use the first supported delivery system. There's an * userspace application that allows changing it at runtime; * * 2) If the current delivery system is not compatible with DVBv3, it gets * the first one that it is compatible. * * NOTE: in order for this to work with applications like Kaffeine that * uses a DVBv5 call for DVB-S2 and a DVBv3 call to go back to * DVB-S, drivers that support both DVB-S and DVB-S2 should have the * SYS_DVBS entry before the SYS_DVBS2, otherwise it won't switch back * to DVB-S. */ static int dvbv3_set_delivery_system(struct dvb_frontend *fe) { int ncaps; u32 delsys = SYS_UNDEFINED; struct dtv_frontend_properties *c = &fe->dtv_property_cache; /* If not set yet, defaults to the first supported delivery system */ if (c->delivery_system == SYS_UNDEFINED) c->delivery_system = fe->ops.delsys[0]; /* * Trivial case: just use the current one, if it already a DVBv3 * delivery system */ if (is_dvbv3_delsys(c->delivery_system)) { dev_dbg(fe->dvb->device, "%s: Using delivery system to %d\n", __func__, c->delivery_system); return 0; } /* * Seek for the first delivery system that it is compatible with a * DVBv3 standard */ ncaps = 0; while (ncaps < MAX_DELSYS && fe->ops.delsys[ncaps]) { if (dvbv3_type(fe->ops.delsys[ncaps]) != DVBV3_UNKNOWN) { delsys = fe->ops.delsys[ncaps]; break; } ncaps++; } if (delsys == SYS_UNDEFINED) { dev_dbg(fe->dvb->device, "%s: Couldn't find a delivery system that works with FE_SET_FRONTEND\n", __func__); return -EINVAL; } return emulate_delivery_system(fe, delsys); } static void prepare_tuning_algo_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_frontend_tune_settings fetunesettings = { 0 }; /* get frontend-specific tuning settings */ if (fe->ops.get_tune_settings && (fe->ops.get_tune_settings(fe, &fetunesettings) == 0)) { fepriv->min_delay = (fetunesettings.min_delay_ms * HZ) / 1000; fepriv->max_drift = fetunesettings.max_drift; fepriv->step_size = fetunesettings.step_size; } else { /* default values */ switch (c->delivery_system) { case SYS_DSS: case SYS_DVBS: case SYS_DVBS2: case SYS_ISDBS: case SYS_TURBO: case SYS_DVBC_ANNEX_A: case SYS_DVBC_ANNEX_C: fepriv->min_delay = HZ / 20; fepriv->step_size = c->symbol_rate / 16000; fepriv->max_drift = c->symbol_rate / 2000; break; case SYS_DVBT: case SYS_DVBT2: case SYS_ISDBT: case SYS_DTMB: fepriv->min_delay = HZ / 20; fepriv->step_size = dvb_frontend_get_stepsize(fe) * 2; fepriv->max_drift = fepriv->step_size + 1; break; default: /* * FIXME: This sounds wrong! if freqency_stepsize is * defined by the frontend, why not use it??? */ fepriv->min_delay = HZ / 20; fepriv->step_size = 0; /* no zigzag */ fepriv->max_drift = 0; break; } } if (dvb_override_tune_delay > 0) fepriv->min_delay = (dvb_override_tune_delay * HZ) / 1000; } /** * dtv_property_process_set - Sets a single DTV property * @fe: Pointer to &struct dvb_frontend * @file: Pointer to &struct file * @cmd: Digital TV command * @data: An unsigned 32-bits number * * This routine assigns the property * value to the corresponding member of * &struct dtv_frontend_properties * * Returns: * Zero on success, negative errno on failure. */ static int dtv_property_process_set(struct dvb_frontend *fe, struct file *file, u32 cmd, u32 data) { int r = 0; struct dtv_frontend_properties *c = &fe->dtv_property_cache; /** Dump DTV command name and value*/ if (!cmd || cmd > DTV_MAX_COMMAND) dev_warn(fe->dvb->device, "%s: SET cmd 0x%08x undefined\n", __func__, cmd); else dev_dbg(fe->dvb->device, "%s: SET cmd 0x%08x (%s) to 0x%08x\n", __func__, cmd, dtv_cmd_name(cmd), data); switch (cmd) { case DTV_CLEAR: /* * Reset a cache of data specific to the frontend here. This does * not effect hardware. */ dvb_frontend_clear_cache(fe); break; case DTV_TUNE: /* * Use the cached Digital TV properties to tune the * frontend */ dev_dbg(fe->dvb->device, "%s: Setting the frontend from property cache\n", __func__); r = dtv_set_frontend(fe); break; case DTV_FREQUENCY: c->frequency = data; break; case DTV_MODULATION: c->modulation = data; break; case DTV_BANDWIDTH_HZ: c->bandwidth_hz = data; break; case DTV_INVERSION: c->inversion = data; break; case DTV_SYMBOL_RATE: c->symbol_rate = data; break; case DTV_INNER_FEC: c->fec_inner = data; break; case DTV_PILOT: c->pilot = data; break; case DTV_ROLLOFF: c->rolloff = data; break; case DTV_DELIVERY_SYSTEM: r = dvbv5_set_delivery_system(fe, data); break; case DTV_VOLTAGE: c->voltage = data; r = dvb_frontend_handle_ioctl(file, FE_SET_VOLTAGE, (void *)c->voltage); break; case DTV_TONE: c->sectone = data; r = dvb_frontend_handle_ioctl(file, FE_SET_TONE, (void *)c->sectone); break; case DTV_CODE_RATE_HP: c->code_rate_HP = data; break; case DTV_CODE_RATE_LP: c->code_rate_LP = data; break; case DTV_GUARD_INTERVAL: c->guard_interval = data; break; case DTV_TRANSMISSION_MODE: c->transmission_mode = data; break; case DTV_HIERARCHY: c->hierarchy = data; break; case DTV_INTERLEAVING: c->interleaving = data; break; /* ISDB-T Support here */ case DTV_ISDBT_PARTIAL_RECEPTION: c->isdbt_partial_reception = data; break; case DTV_ISDBT_SOUND_BROADCASTING: c->isdbt_sb_mode = data; break; case DTV_ISDBT_SB_SUBCHANNEL_ID: c->isdbt_sb_subchannel = data; break; case DTV_ISDBT_SB_SEGMENT_IDX: c->isdbt_sb_segment_idx = data; break; case DTV_ISDBT_SB_SEGMENT_COUNT: c->isdbt_sb_segment_count = data; break; case DTV_ISDBT_LAYER_ENABLED: c->isdbt_layer_enabled = data; break; case DTV_ISDBT_LAYERA_FEC: c->layer[0].fec = data; break; case DTV_ISDBT_LAYERA_MODULATION: c->layer[0].modulation = data; break; case DTV_ISDBT_LAYERA_SEGMENT_COUNT: c->layer[0].segment_count = data; break; case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: c->layer[0].interleaving = data; break; case DTV_ISDBT_LAYERB_FEC: c->layer[1].fec = data; break; case DTV_ISDBT_LAYERB_MODULATION: c->layer[1].modulation = data; break; case DTV_ISDBT_LAYERB_SEGMENT_COUNT: c->layer[1].segment_count = data; break; case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: c->layer[1].interleaving = data; break; case DTV_ISDBT_LAYERC_FEC: c->layer[2].fec = data; break; case DTV_ISDBT_LAYERC_MODULATION: c->layer[2].modulation = data; break; case DTV_ISDBT_LAYERC_SEGMENT_COUNT: c->layer[2].segment_count = data; break; case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: c->layer[2].interleaving = data; break; /* Multistream support */ case DTV_STREAM_ID: case DTV_DVBT2_PLP_ID_LEGACY: c->stream_id = data; break; /* Physical layer scrambling support */ case DTV_SCRAMBLING_SEQUENCE_INDEX: c->scrambling_sequence_index = data; break; /* ATSC-MH */ case DTV_ATSCMH_PARADE_ID: fe->dtv_property_cache.atscmh_parade_id = data; break; case DTV_ATSCMH_RS_FRAME_ENSEMBLE: fe->dtv_property_cache.atscmh_rs_frame_ensemble = data; break; case DTV_LNA: c->lna = data; if (fe->ops.set_lna) r = fe->ops.set_lna(fe); if (r < 0) c->lna = LNA_AUTO; break; default: return -EINVAL; } return r; } static int dvb_frontend_do_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; int err; dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd)); if (down_interruptible(&fepriv->sem)) return -ERESTARTSYS; if (fe->exit != DVB_FE_NO_EXIT) { up(&fepriv->sem); return -ENODEV; } /* * If the frontend is opened in read-only mode, only the ioctls * that don't interfere with the tune logic should be accepted. * That allows an external application to monitor the DVB QoS and * statistics parameters. * * That matches all _IOR() ioctls, except for two special cases: * - FE_GET_EVENT is part of the tuning logic on a DVB application; * - FE_DISEQC_RECV_SLAVE_REPLY is part of DiSEqC 2.0 * setup * So, those two ioctls should also return -EPERM, as otherwise * reading from them would interfere with a DVB tune application */ if ((file->f_flags & O_ACCMODE) == O_RDONLY && (_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT || cmd == FE_DISEQC_RECV_SLAVE_REPLY)) { up(&fepriv->sem); return -EPERM; } err = dvb_frontend_handle_ioctl(file, cmd, parg); up(&fepriv->sem); return err; } static long dvb_frontend_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct dvb_device *dvbdev = file->private_data; if (!dvbdev) return -ENODEV; return dvb_usercopy(file, cmd, arg, dvb_frontend_do_ioctl); } #ifdef CONFIG_COMPAT struct compat_dtv_property { __u32 cmd; __u32 reserved[3]; union { __u32 data; struct dtv_fe_stats st; struct { __u8 data[32]; __u32 len; __u32 reserved1[3]; compat_uptr_t reserved2; } buffer; } u; int result; } __attribute__ ((packed)); struct compat_dtv_properties { __u32 num; compat_uptr_t props; }; #define COMPAT_FE_SET_PROPERTY _IOW('o', 82, struct compat_dtv_properties) #define COMPAT_FE_GET_PROPERTY _IOR('o', 83, struct compat_dtv_properties) static int dvb_frontend_handle_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; int i, err = 0; if (cmd == COMPAT_FE_SET_PROPERTY) { struct compat_dtv_properties prop, *tvps = NULL; struct compat_dtv_property *tvp = NULL; if (copy_from_user(&prop, compat_ptr(arg), sizeof(prop))) return -EFAULT; tvps = ∝ /* * Put an arbitrary limit on the number of messages that can * be sent at once */ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) return -EINVAL; tvp = memdup_array_user(compat_ptr(tvps->props), tvps->num, sizeof(*tvp)); if (IS_ERR(tvp)) return PTR_ERR(tvp); for (i = 0; i < tvps->num; i++) { err = dtv_property_process_set(fe, file, (tvp + i)->cmd, (tvp + i)->u.data); if (err < 0) { kfree(tvp); return err; } } kfree(tvp); } else if (cmd == COMPAT_FE_GET_PROPERTY) { struct compat_dtv_properties prop, *tvps = NULL; struct compat_dtv_property *tvp = NULL; struct dtv_frontend_properties getp = fe->dtv_property_cache; if (copy_from_user(&prop, compat_ptr(arg), sizeof(prop))) return -EFAULT; tvps = ∝ /* * Put an arbitrary limit on the number of messages that can * be sent at once */ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) return -EINVAL; tvp = memdup_array_user(compat_ptr(tvps->props), tvps->num, sizeof(*tvp)); if (IS_ERR(tvp)) return PTR_ERR(tvp); /* * Let's use our own copy of property cache, in order to * avoid mangling with DTV zigzag logic, as drivers might * return crap, if they don't check if the data is available * before updating the properties cache. */ if (fepriv->state != FESTATE_IDLE) { err = dtv_get_frontend(fe, &getp, NULL); if (err < 0) { kfree(tvp); return err; } } for (i = 0; i < tvps->num; i++) { err = dtv_property_process_get( fe, &getp, (struct dtv_property *)(tvp + i), file); if (err < 0) { kfree(tvp); return err; } } if (copy_to_user((void __user *)compat_ptr(tvps->props), tvp, tvps->num * sizeof(struct compat_dtv_property))) { kfree(tvp); return -EFAULT; } kfree(tvp); } return err; } static long dvb_frontend_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; int err; if (cmd == COMPAT_FE_SET_PROPERTY || cmd == COMPAT_FE_GET_PROPERTY) { if (down_interruptible(&fepriv->sem)) return -ERESTARTSYS; err = dvb_frontend_handle_compat_ioctl(file, cmd, arg); up(&fepriv->sem); return err; } return dvb_frontend_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); } #endif static int dtv_set_frontend(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 rolloff = 0; if (dvb_frontend_check_parameters(fe) < 0) return -EINVAL; /* * Initialize output parameters to match the values given by * the user. FE_SET_FRONTEND triggers an initial frontend event * with status = 0, which copies output parameters to userspace. */ dtv_property_legacy_params_sync(fe, c, &fepriv->parameters_out); /* * Be sure that the bandwidth will be filled for all * non-satellite systems, as tuners need to know what * low pass/Nyquist half filter should be applied, in * order to avoid inter-channel noise. * * ISDB-T and DVB-T/T2 already sets bandwidth. * ATSC and DVB-C don't set, so, the core should fill it. * * On DVB-C Annex A and C, the bandwidth is a function of * the roll-off and symbol rate. Annex B defines different * roll-off factors depending on the modulation. Fortunately, * Annex B is only used with 6MHz, so there's no need to * calculate it. * * While not officially supported, a side effect of handling it at * the cache level is that a program could retrieve the bandwidth * via DTV_BANDWIDTH_HZ, which may be useful for test programs. */ switch (c->delivery_system) { case SYS_ATSC: case SYS_DVBC_ANNEX_B: c->bandwidth_hz = 6000000; break; case SYS_DVBC_ANNEX_A: rolloff = 115; break; case SYS_DVBC_ANNEX_C: rolloff = 113; break; case SYS_DSS: rolloff = 120; break; case SYS_DVBS: case SYS_TURBO: case SYS_ISDBS: rolloff = 135; break; case SYS_DVBS2: switch (c->rolloff) { case ROLLOFF_20: rolloff = 120; break; case ROLLOFF_25: rolloff = 125; break; default: case ROLLOFF_35: rolloff = 135; } break; default: break; } if (rolloff) c->bandwidth_hz = mult_frac(c->symbol_rate, rolloff, 100); /* force auto frequency inversion if requested */ if (dvb_force_auto_inversion) c->inversion = INVERSION_AUTO; /* * without hierarchical coding code_rate_LP is irrelevant, * so we tolerate the otherwise invalid FEC_NONE setting */ if (c->hierarchy == HIERARCHY_NONE && c->code_rate_LP == FEC_NONE) c->code_rate_LP = FEC_AUTO; prepare_tuning_algo_parameters(fe); fepriv->state = FESTATE_RETUNE; /* Request the search algorithm to search */ fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; dvb_frontend_clear_events(fe); dvb_frontend_add_event(fe, 0); dvb_frontend_wakeup(fe); fepriv->status = 0; return 0; } static int dvb_get_property(struct dvb_frontend *fe, struct file *file, struct dtv_properties *tvps) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_property *tvp = NULL; struct dtv_frontend_properties getp; int i, err; memcpy(&getp, &fe->dtv_property_cache, sizeof(getp)); dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num); dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props); /* * Put an arbitrary limit on the number of messages that can * be sent at once */ if (!tvps->num || tvps->num > DTV_IOCTL_MAX_MSGS) return -EINVAL; tvp = memdup_array_user((void __user *)tvps->props, tvps->num, sizeof(*tvp)); if (IS_ERR(tvp)) return PTR_ERR(tvp); /* * Let's use our own copy of property cache, in order to * avoid mangling with DTV zigzag logic, as drivers might * return crap, if they don't check if the data is available * before updating the properties cache. */ if (fepriv->state != FESTATE_IDLE) { err = dtv_get_frontend(fe, &getp, NULL); if (err < 0) goto out; } for (i = 0; i < tvps->num; i++) { err = dtv_property_process_get(fe, &getp, tvp + i, file); if (err < 0) goto out; } if (copy_to_user((void __user *)tvps->props, tvp, tvps->num * sizeof(struct dtv_property))) { err = -EFAULT; goto out; } err = 0; out: kfree(tvp); return err; } static int dvb_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p_out) { struct dtv_frontend_properties getp; /* * Let's use our own copy of property cache, in order to * avoid mangling with DTV zigzag logic, as drivers might * return crap, if they don't check if the data is available * before updating the properties cache. */ memcpy(&getp, &fe->dtv_property_cache, sizeof(getp)); return dtv_get_frontend(fe, &getp, p_out); } static int dvb_frontend_handle_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int i, err = -ENOTSUPP; dev_dbg(fe->dvb->device, "%s:\n", __func__); switch (cmd) { case FE_SET_PROPERTY: { struct dtv_properties *tvps = parg; struct dtv_property *tvp = NULL; dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num); dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props); /* * Put an arbitrary limit on the number of messages that can * be sent at once */ if (!tvps->num || (tvps->num > DTV_IOCTL_MAX_MSGS)) return -EINVAL; tvp = memdup_array_user((void __user *)tvps->props, tvps->num, sizeof(*tvp)); if (IS_ERR(tvp)) return PTR_ERR(tvp); for (i = 0; i < tvps->num; i++) { err = dtv_property_process_set(fe, file, (tvp + i)->cmd, (tvp + i)->u.data); if (err < 0) { kfree(tvp); return err; } } kfree(tvp); err = 0; break; } case FE_GET_PROPERTY: err = dvb_get_property(fe, file, parg); break; case FE_GET_INFO: { struct dvb_frontend_info *info = parg; memset(info, 0, sizeof(*info)); strscpy(info->name, fe->ops.info.name, sizeof(info->name)); info->symbol_rate_min = fe->ops.info.symbol_rate_min; info->symbol_rate_max = fe->ops.info.symbol_rate_max; info->symbol_rate_tolerance = fe->ops.info.symbol_rate_tolerance; info->caps = fe->ops.info.caps; info->frequency_stepsize = dvb_frontend_get_stepsize(fe); dvb_frontend_get_frequency_limits(fe, &info->frequency_min, &info->frequency_max, &info->frequency_tolerance); /* * Associate the 4 delivery systems supported by DVBv3 * API with their DVBv5 counterpart. For the other standards, * use the closest type, assuming that it would hopefully * work with a DVBv3 application. * It should be noticed that, on multi-frontend devices with * different types (terrestrial and cable, for example), * a pure DVBv3 application won't be able to use all delivery * systems. Yet, changing the DVBv5 cache to the other delivery * system should be enough for making it work. */ switch (dvbv3_type(c->delivery_system)) { case DVBV3_QPSK: info->type = FE_QPSK; break; case DVBV3_ATSC: info->type = FE_ATSC; break; case DVBV3_QAM: info->type = FE_QAM; break; case DVBV3_OFDM: info->type = FE_OFDM; break; default: dev_err(fe->dvb->device, "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n", __func__, c->delivery_system); info->type = FE_OFDM; } dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n", __func__, c->delivery_system, info->type); /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */ if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) info->caps |= FE_CAN_INVERSION_AUTO; err = 0; break; } case FE_READ_STATUS: { enum fe_status *status = parg; /* if retune was requested but hasn't occurred yet, prevent * that user get signal state from previous tuning */ if (fepriv->state == FESTATE_RETUNE || fepriv->state == FESTATE_ERROR) { err = 0; *status = 0; break; } if (fe->ops.read_status) err = fe->ops.read_status(fe, status); break; } case FE_DISEQC_RESET_OVERLOAD: if (fe->ops.diseqc_reset_overload) { err = fe->ops.diseqc_reset_overload(fe); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_SEND_MASTER_CMD: if (fe->ops.diseqc_send_master_cmd) { struct dvb_diseqc_master_cmd *cmd = parg; if (cmd->msg_len > sizeof(cmd->msg)) { err = -EINVAL; break; } err = fe->ops.diseqc_send_master_cmd(fe, cmd); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_SEND_BURST: if (fe->ops.diseqc_send_burst) { err = fe->ops.diseqc_send_burst(fe, (long)parg); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_SET_TONE: if (fe->ops.set_tone) { fepriv->tone = (long)parg; err = fe->ops.set_tone(fe, fepriv->tone); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_SET_VOLTAGE: if (fe->ops.set_voltage) { fepriv->voltage = (long)parg; err = fe->ops.set_voltage(fe, fepriv->voltage); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_RECV_SLAVE_REPLY: if (fe->ops.diseqc_recv_slave_reply) err = fe->ops.diseqc_recv_slave_reply(fe, parg); break; case FE_ENABLE_HIGH_LNB_VOLTAGE: if (fe->ops.enable_high_lnb_voltage) err = fe->ops.enable_high_lnb_voltage(fe, (long)parg); break; case FE_SET_FRONTEND_TUNE_MODE: fepriv->tune_mode_flags = (unsigned long)parg; err = 0; break; /* DEPRECATED dish control ioctls */ case FE_DISHNETWORK_SEND_LEGACY_CMD: if (fe->ops.dishnetwork_send_legacy_command) { err = fe->ops.dishnetwork_send_legacy_command(fe, (unsigned long)parg); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } else if (fe->ops.set_voltage) { /* * NOTE: This is a fallback condition. Some frontends * (stv0299 for instance) take longer than 8msec to * respond to a set_voltage command. Those switches * need custom routines to switch properly. For all * other frontends, the following should work ok. * Dish network legacy switches (as used by Dish500) * are controlled by sending 9-bit command words * spaced 8msec apart. * the actual command word is switch/port dependent * so it is up to the userspace application to send * the right command. * The command must always start with a '0' after * initialization, so parg is 8 bits and does not * include the initialization or start bit */ unsigned long swcmd = ((unsigned long)parg) << 1; ktime_t nexttime; ktime_t tv[10]; int i; u8 last = 1; if (dvb_frontend_debug) dprintk("switch command: 0x%04lx\n", swcmd); nexttime = ktime_get_boottime(); if (dvb_frontend_debug) tv[0] = nexttime; /* before sending a command, initialize by sending * a 32ms 18V to the switch */ fe->ops.set_voltage(fe, SEC_VOLTAGE_18); dvb_frontend_sleep_until(&nexttime, 32000); for (i = 0; i < 9; i++) { if (dvb_frontend_debug) tv[i + 1] = ktime_get_boottime(); if ((swcmd & 0x01) != last) { /* set voltage to (last ? 13V : 18V) */ fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18); last = (last) ? 0 : 1; } swcmd = swcmd >> 1; if (i != 8) dvb_frontend_sleep_until(&nexttime, 8000); } if (dvb_frontend_debug) { dprintk("(adapter %d): switch delay (should be 32k followed by all 8k)\n", fe->dvb->num); for (i = 1; i < 10; i++) pr_info("%d: %d\n", i, (int)ktime_us_delta(tv[i], tv[i - 1])); } err = 0; fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; /* DEPRECATED statistics ioctls */ case FE_READ_BER: if (fe->ops.read_ber) { if (fepriv->thread) err = fe->ops.read_ber(fe, parg); else err = -EAGAIN; } break; case FE_READ_SIGNAL_STRENGTH: if (fe->ops.read_signal_strength) { if (fepriv->thread) err = fe->ops.read_signal_strength(fe, parg); else err = -EAGAIN; } break; case FE_READ_SNR: if (fe->ops.read_snr) { if (fepriv->thread) err = fe->ops.read_snr(fe, parg); else err = -EAGAIN; } break; case FE_READ_UNCORRECTED_BLOCKS: if (fe->ops.read_ucblocks) { if (fepriv->thread) err = fe->ops.read_ucblocks(fe, parg); else err = -EAGAIN; } break; /* DEPRECATED DVBv3 ioctls */ case FE_SET_FRONTEND: err = dvbv3_set_delivery_system(fe); if (err) break; err = dtv_property_cache_sync(fe, c, parg); if (err) break; err = dtv_set_frontend(fe); break; case FE_GET_EVENT: err = dvb_frontend_get_event(fe, parg, file->f_flags); break; case FE_GET_FRONTEND: err = dvb_get_frontend(fe, parg); break; default: return -ENOTSUPP; } /* switch */ return err; } static __poll_t dvb_frontend_poll(struct file *file, struct poll_table_struct *wait) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; dev_dbg_ratelimited(fe->dvb->device, "%s:\n", __func__); poll_wait(file, &fepriv->events.wait_queue, wait); if (fepriv->events.eventw != fepriv->events.eventr) return (EPOLLIN | EPOLLRDNORM | EPOLLPRI); return 0; } static int dvb_frontend_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_adapter *adapter = fe->dvb; int ret; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (fe->exit == DVB_FE_DEVICE_REMOVED) return -ENODEV; if (adapter->mfe_shared == 2) { mutex_lock(&adapter->mfe_lock); if ((file->f_flags & O_ACCMODE) != O_RDONLY) { if (adapter->mfe_dvbdev && !adapter->mfe_dvbdev->writers) { mutex_unlock(&adapter->mfe_lock); return -EBUSY; } adapter->mfe_dvbdev = dvbdev; } } else if (adapter->mfe_shared) { mutex_lock(&adapter->mfe_lock); if (!adapter->mfe_dvbdev) adapter->mfe_dvbdev = dvbdev; else if (adapter->mfe_dvbdev != dvbdev) { struct dvb_device *mfedev = adapter->mfe_dvbdev; struct dvb_frontend *mfe = mfedev->priv; struct dvb_frontend_private *mfepriv = mfe->frontend_priv; int mferetry = (dvb_mfe_wait_time << 1); mutex_unlock(&adapter->mfe_lock); while (mferetry-- && (mfedev->users != -1 || mfepriv->thread)) { if (msleep_interruptible(500)) { if (signal_pending(current)) return -EINTR; } } mutex_lock(&adapter->mfe_lock); if (adapter->mfe_dvbdev != dvbdev) { mfedev = adapter->mfe_dvbdev; mfe = mfedev->priv; mfepriv = mfe->frontend_priv; if (mfedev->users != -1 || mfepriv->thread) { mutex_unlock(&adapter->mfe_lock); return -EBUSY; } adapter->mfe_dvbdev = dvbdev; } } } if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) { if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0) goto err0; /* If we took control of the bus, we need to force reinitialization. This is because many ts_bus_ctrl() functions strobe the RESET pin on the demod, and if the frontend thread already exists then the dvb_init() routine won't get called (which is what usually does initial register configuration). */ fepriv->reinitialise = 1; } if ((ret = dvb_generic_open(inode, file)) < 0) goto err1; if ((file->f_flags & O_ACCMODE) != O_RDONLY) { /* normal tune mode when opened R/W */ fepriv->tune_mode_flags &= ~FE_TUNE_MODE_ONESHOT; fepriv->tone = -1; fepriv->voltage = -1; #ifdef CONFIG_MEDIA_CONTROLLER_DVB mutex_lock(&fe->dvb->mdev_lock); if (fe->dvb->mdev) { mutex_lock(&fe->dvb->mdev->graph_mutex); if (fe->dvb->mdev->enable_source) ret = fe->dvb->mdev->enable_source( dvbdev->entity, &fepriv->pipe); mutex_unlock(&fe->dvb->mdev->graph_mutex); if (ret) { mutex_unlock(&fe->dvb->mdev_lock); dev_err(fe->dvb->device, "Tuner is busy. Error %d\n", ret); goto err2; } } mutex_unlock(&fe->dvb->mdev_lock); #endif ret = dvb_frontend_start(fe); if (ret) goto err3; /* empty event queue */ fepriv->events.eventr = fepriv->events.eventw = 0; } dvb_frontend_get(fe); if (adapter->mfe_shared) mutex_unlock(&adapter->mfe_lock); return ret; err3: #ifdef CONFIG_MEDIA_CONTROLLER_DVB mutex_lock(&fe->dvb->mdev_lock); if (fe->dvb->mdev) { mutex_lock(&fe->dvb->mdev->graph_mutex); if (fe->dvb->mdev->disable_source) fe->dvb->mdev->disable_source(dvbdev->entity); mutex_unlock(&fe->dvb->mdev->graph_mutex); } mutex_unlock(&fe->dvb->mdev_lock); err2: #endif dvb_generic_release(inode, file); err1: if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) fe->ops.ts_bus_ctrl(fe, 0); err0: if (adapter->mfe_shared) mutex_unlock(&adapter->mfe_lock); return ret; } static int dvb_frontend_release(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; int ret; dev_dbg(fe->dvb->device, "%s:\n", __func__); if ((file->f_flags & O_ACCMODE) != O_RDONLY) { fepriv->release_jiffies = jiffies; mb(); } ret = dvb_generic_release(inode, file); if (dvbdev->users == -1) { wake_up(&fepriv->wait_queue); #ifdef CONFIG_MEDIA_CONTROLLER_DVB mutex_lock(&fe->dvb->mdev_lock); if (fe->dvb->mdev) { mutex_lock(&fe->dvb->mdev->graph_mutex); if (fe->dvb->mdev->disable_source) fe->dvb->mdev->disable_source(dvbdev->entity); mutex_unlock(&fe->dvb->mdev->graph_mutex); } mutex_unlock(&fe->dvb->mdev_lock); #endif if (fe->exit != DVB_FE_NO_EXIT) wake_up(&dvbdev->wait_queue); if (fe->ops.ts_bus_ctrl) fe->ops.ts_bus_ctrl(fe, 0); } dvb_frontend_put(fe); return ret; } static const struct file_operations dvb_frontend_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dvb_frontend_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = dvb_frontend_compat_ioctl, #endif .poll = dvb_frontend_poll, .open = dvb_frontend_open, .release = dvb_frontend_release, .llseek = noop_llseek, }; int dvb_frontend_suspend(struct dvb_frontend *fe) { int ret = 0; dev_dbg(fe->dvb->device, "%s: adap=%d fe=%d\n", __func__, fe->dvb->num, fe->id); if (fe->ops.tuner_ops.suspend) ret = fe->ops.tuner_ops.suspend(fe); else if (fe->ops.tuner_ops.sleep) ret = fe->ops.tuner_ops.sleep(fe); if (fe->ops.suspend) ret = fe->ops.suspend(fe); else if (fe->ops.sleep) ret = fe->ops.sleep(fe); return ret; } EXPORT_SYMBOL(dvb_frontend_suspend); int dvb_frontend_resume(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; int ret = 0; dev_dbg(fe->dvb->device, "%s: adap=%d fe=%d\n", __func__, fe->dvb->num, fe->id); fe->exit = DVB_FE_DEVICE_RESUME; if (fe->ops.resume) ret = fe->ops.resume(fe); else if (fe->ops.init) ret = fe->ops.init(fe); if (fe->ops.tuner_ops.resume) ret = fe->ops.tuner_ops.resume(fe); else if (fe->ops.tuner_ops.init) ret = fe->ops.tuner_ops.init(fe); if (fe->ops.set_tone && fepriv->tone != -1) fe->ops.set_tone(fe, fepriv->tone); if (fe->ops.set_voltage && fepriv->voltage != -1) fe->ops.set_voltage(fe, fepriv->voltage); fe->exit = DVB_FE_NO_EXIT; fepriv->state = FESTATE_RETUNE; dvb_frontend_wakeup(fe); return ret; } EXPORT_SYMBOL(dvb_frontend_resume); int dvb_register_frontend(struct dvb_adapter *dvb, struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv; const struct dvb_device dvbdev_template = { .users = ~0, .writers = 1, .readers = (~0) - 1, .fops = &dvb_frontend_fops, #if defined(CONFIG_MEDIA_CONTROLLER_DVB) .name = fe->ops.info.name, #endif }; int ret; dev_dbg(dvb->device, "%s:\n", __func__); if (mutex_lock_interruptible(&frontend_mutex)) return -ERESTARTSYS; fe->frontend_priv = kzalloc(sizeof(struct dvb_frontend_private), GFP_KERNEL); if (!fe->frontend_priv) { mutex_unlock(&frontend_mutex); return -ENOMEM; } fepriv = fe->frontend_priv; kref_init(&fe->refcount); /* * After initialization, there need to be two references: one * for dvb_unregister_frontend(), and another one for * dvb_frontend_detach(). */ dvb_frontend_get(fe); sema_init(&fepriv->sem, 1); init_waitqueue_head(&fepriv->wait_queue); init_waitqueue_head(&fepriv->events.wait_queue); mutex_init(&fepriv->events.mtx); fe->dvb = dvb; fepriv->inversion = INVERSION_OFF; dev_info(fe->dvb->device, "DVB: registering adapter %i frontend %i (%s)...\n", fe->dvb->num, fe->id, fe->ops.info.name); ret = dvb_register_device(fe->dvb, &fepriv->dvbdev, &dvbdev_template, fe, DVB_DEVICE_FRONTEND, 0); if (ret) { dvb_frontend_put(fe); mutex_unlock(&frontend_mutex); return ret; } /* * Initialize the cache to the proper values according with the * first supported delivery system (ops->delsys[0]) */ fe->dtv_property_cache.delivery_system = fe->ops.delsys[0]; dvb_frontend_clear_cache(fe); mutex_unlock(&frontend_mutex); return 0; } EXPORT_SYMBOL(dvb_register_frontend); int dvb_unregister_frontend(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; dev_dbg(fe->dvb->device, "%s:\n", __func__); mutex_lock(&frontend_mutex); dvb_frontend_stop(fe); dvb_remove_device(fepriv->dvbdev); /* fe is invalid now */ mutex_unlock(&frontend_mutex); dvb_frontend_put(fe); return 0; } EXPORT_SYMBOL(dvb_unregister_frontend); static void dvb_frontend_invoke_release(struct dvb_frontend *fe, void (*release)(struct dvb_frontend *fe)) { if (release) { release(fe); #ifdef CONFIG_MEDIA_ATTACH dvb_detach(release); #endif } } void dvb_frontend_detach(struct dvb_frontend *fe) { dvb_frontend_invoke_release(fe, fe->ops.release_sec); dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release); dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release); dvb_frontend_put(fe); } EXPORT_SYMBOL(dvb_frontend_detach); |
126 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 | /* SPDX-License-Identifier: GPL-2.0 */ /* thread_info.h: common low-level thread information accessors * * Copyright (C) 2002 David Howells (dhowells@redhat.com) * - Incorporating suggestions made by Linus Torvalds */ #ifndef _LINUX_THREAD_INFO_H #define _LINUX_THREAD_INFO_H #include <linux/types.h> #include <linux/limits.h> #include <linux/bug.h> #include <linux/restart_block.h> #include <linux/errno.h> #ifdef CONFIG_THREAD_INFO_IN_TASK /* * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels, * including <asm/current.h> can cause a circular dependency on some platforms. */ #include <asm/current.h> #define current_thread_info() ((struct thread_info *)current) #endif #include <linux/bitops.h> /* * For per-arch arch_within_stack_frames() implementations, defined in * asm/thread_info.h. */ enum { BAD_STACK = -1, NOT_STACK = 0, GOOD_FRAME, GOOD_STACK, }; #ifdef CONFIG_GENERIC_ENTRY enum syscall_work_bit { SYSCALL_WORK_BIT_SECCOMP, SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT, SYSCALL_WORK_BIT_SYSCALL_TRACE, SYSCALL_WORK_BIT_SYSCALL_EMU, SYSCALL_WORK_BIT_SYSCALL_AUDIT, SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH, SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP, }; #define SYSCALL_WORK_SECCOMP BIT(SYSCALL_WORK_BIT_SECCOMP) #define SYSCALL_WORK_SYSCALL_TRACEPOINT BIT(SYSCALL_WORK_BIT_SYSCALL_TRACEPOINT) #define SYSCALL_WORK_SYSCALL_TRACE BIT(SYSCALL_WORK_BIT_SYSCALL_TRACE) #define SYSCALL_WORK_SYSCALL_EMU BIT(SYSCALL_WORK_BIT_SYSCALL_EMU) #define SYSCALL_WORK_SYSCALL_AUDIT BIT(SYSCALL_WORK_BIT_SYSCALL_AUDIT) #define SYSCALL_WORK_SYSCALL_USER_DISPATCH BIT(SYSCALL_WORK_BIT_SYSCALL_USER_DISPATCH) #define SYSCALL_WORK_SYSCALL_EXIT_TRAP BIT(SYSCALL_WORK_BIT_SYSCALL_EXIT_TRAP) #endif #include <asm/thread_info.h> #ifndef TIF_NEED_RESCHED_LAZY #ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY #error Inconsistent PREEMPT_LAZY #endif #define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED #define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED #endif #ifdef __KERNEL__ #ifndef arch_set_restart_data #define arch_set_restart_data(restart) do { } while (0) #endif static inline long set_restart_fn(struct restart_block *restart, long (*fn)(struct restart_block *)) { restart->fn = fn; arch_set_restart_data(restart); return -ERESTART_RESTARTBLOCK; } #ifndef THREAD_ALIGN #define THREAD_ALIGN THREAD_SIZE #endif #define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) /* * flag set/clear/test wrappers * - pass TIF_xxxx constants to these functions */ static inline void set_ti_thread_flag(struct thread_info *ti, int flag) { set_bit(flag, (unsigned long *)&ti->flags); } static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) { clear_bit(flag, (unsigned long *)&ti->flags); } static inline void update_ti_thread_flag(struct thread_info *ti, int flag, bool value) { if (value) set_ti_thread_flag(ti, flag); else clear_ti_thread_flag(ti, flag); } static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_set_bit(flag, (unsigned long *)&ti->flags); } static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) { return test_and_clear_bit(flag, (unsigned long *)&ti->flags); } static inline int test_ti_thread_flag(struct thread_info *ti, int flag) { return test_bit(flag, (unsigned long *)&ti->flags); } /* * This may be used in noinstr code, and needs to be __always_inline to prevent * inadvertent instrumentation. */ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti) { return READ_ONCE(ti->flags); } #define set_thread_flag(flag) \ set_ti_thread_flag(current_thread_info(), flag) #define clear_thread_flag(flag) \ clear_ti_thread_flag(current_thread_info(), flag) #define update_thread_flag(flag, value) \ update_ti_thread_flag(current_thread_info(), flag, value) #define test_and_set_thread_flag(flag) \ test_and_set_ti_thread_flag(current_thread_info(), flag) #define test_and_clear_thread_flag(flag) \ test_and_clear_ti_thread_flag(current_thread_info(), flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) #define read_thread_flags() \ read_ti_thread_flags(current_thread_info()) #define read_task_thread_flags(t) \ read_ti_thread_flags(task_thread_info(t)) #ifdef CONFIG_GENERIC_ENTRY #define set_syscall_work(fl) \ set_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) #define test_syscall_work(fl) \ test_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) #define clear_syscall_work(fl) \ clear_bit(SYSCALL_WORK_BIT_##fl, ¤t_thread_info()->syscall_work) #define set_task_syscall_work(t, fl) \ set_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) #define test_task_syscall_work(t, fl) \ test_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) #define clear_task_syscall_work(t, fl) \ clear_bit(SYSCALL_WORK_BIT_##fl, &task_thread_info(t)->syscall_work) #else /* CONFIG_GENERIC_ENTRY */ #define set_syscall_work(fl) \ set_ti_thread_flag(current_thread_info(), TIF_##fl) #define test_syscall_work(fl) \ test_ti_thread_flag(current_thread_info(), TIF_##fl) #define clear_syscall_work(fl) \ clear_ti_thread_flag(current_thread_info(), TIF_##fl) #define set_task_syscall_work(t, fl) \ set_ti_thread_flag(task_thread_info(t), TIF_##fl) #define test_task_syscall_work(t, fl) \ test_ti_thread_flag(task_thread_info(t), TIF_##fl) #define clear_task_syscall_work(t, fl) \ clear_ti_thread_flag(task_thread_info(t), TIF_##fl) #endif /* !CONFIG_GENERIC_ENTRY */ #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H static __always_inline bool tif_test_bit(int bit) { return arch_test_bit(bit, (unsigned long *)(¤t_thread_info()->flags)); } #else static __always_inline bool tif_test_bit(int bit) { return test_bit(bit, (unsigned long *)(¤t_thread_info()->flags)); } #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ static __always_inline bool tif_need_resched(void) { return tif_test_bit(TIF_NEED_RESCHED); } #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, const void * const stackend, const void *obj, unsigned long len) { return 0; } #endif #ifndef arch_setup_new_exec static inline void arch_setup_new_exec(void) { } #endif void arch_task_cache_init(void); /* for CONFIG_SH */ void arch_release_task_struct(struct task_struct *tsk); int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #endif /* __KERNEL__ */ #endif /* _LINUX_THREAD_INFO_H */ |
7 567 7 6 7 89 89 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 | #include <linux/notifier.h> #include <linux/socket.h> #include <linux/kernel.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/fib_notifier.h> #include <net/netns/ipv6.h> #include <net/ip6_fib.h> int call_fib6_notifier(struct notifier_block *nb, enum fib_event_type event_type, struct fib_notifier_info *info) { info->family = AF_INET6; return call_fib_notifier(nb, event_type, info); } int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, struct fib_notifier_info *info) { info->family = AF_INET6; return call_fib_notifiers(net, event_type, info); } static unsigned int fib6_seq_read(const struct net *net) { return fib6_tables_seq_read(net) + fib6_rules_seq_read(net); } static int fib6_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack) { int err; err = fib6_rules_dump(net, nb, extack); if (err) return err; return fib6_tables_dump(net, nb, extack); } static const struct fib_notifier_ops fib6_notifier_ops_template = { .family = AF_INET6, .fib_seq_read = fib6_seq_read, .fib_dump = fib6_dump, .owner = THIS_MODULE, }; int __net_init fib6_notifier_init(struct net *net) { struct fib_notifier_ops *ops; ops = fib_notifier_ops_register(&fib6_notifier_ops_template, net); if (IS_ERR(ops)) return PTR_ERR(ops); net->ipv6.notifier_ops = ops; return 0; } void __net_exit fib6_notifier_exit(struct net *net) { fib_notifier_ops_unregister(net->ipv6.notifier_ops); } |
102 30 3 4 273 227 3 4 274 273 264 9 270 6 274 271 4 258 257 259 248 249 249 201 201 201 200 30 162 163 163 61 5 2 97 97 55 2 125 125 64 95 125 125 100 63 84 85 46 46 2 85 84 51 32 21 183 158 184 173 11 183 160 158 5 4 158 8 8 8 8 7 1 8 7 10 7 4 10 10 1 10 12 4 12 13 13 13 12 10 13 12 8 7 6 1 1 2 1 1 1 20 4 11 5 5 14 2 2 1 13 13 10 3 13 5 6 2 10 98 99 63 49 67 56 3 52 48 52 5 5 5 3 3 1 2 1 5 149 2 19 4 2 2 1 1 1 2 14 14 13 12 14 14 13 13 12 14 14 3 3 2 1 3 10 2 3 5 6 4 1 1 8 6 4 5 3 6 6 3 2 1 3 2 2 2 2 8 4 12 10 1 10 30 1 2 4 4 7 2 16 23 41 42 1 4 16 21 21 32 3 2 1 11 3 4 8 18 21 3 2 5 5 4 1 1 2 3 3 3 3 3 3 3 3 3 33 33 33 33 30 33 33 7 7 7 7 4 2 2 4 4 4 1 3 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Forwarding database * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/spinlock.h> #include <linux/times.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/atomic.h> #include <linux/unaligned.h> #include <linux/if_vlan.h> #include <net/switchdev.h> #include <trace/events/bridge.h> #include "br_private.h" static const struct rhashtable_params br_fdb_rht_params = { .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode), .key_offset = offsetof(struct net_bridge_fdb_entry, key), .key_len = sizeof(struct net_bridge_fdb_key), .automatic_shrinking = true, }; static struct kmem_cache *br_fdb_cache __read_mostly; int __init br_fdb_init(void) { br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN); if (!br_fdb_cache) return -ENOMEM; return 0; } void br_fdb_fini(void) { kmem_cache_destroy(br_fdb_cache); } int br_fdb_hash_init(struct net_bridge *br) { return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params); } void br_fdb_hash_fini(struct net_bridge *br) { rhashtable_destroy(&br->fdb_hash_tbl); } /* if topology_changing then use forward_delay (default 15 sec) * otherwise keep longer (default 5 minutes) */ static inline unsigned long hold_time(const struct net_bridge *br) { return br->topology_change ? br->forward_delay : br->ageing_time; } static inline int has_expired(const struct net_bridge *br, const struct net_bridge_fdb_entry *fdb) { return !test_bit(BR_FDB_STATIC, &fdb->flags) && !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) && time_before_eq(fdb->updated + hold_time(br), jiffies); } static int fdb_to_nud(const struct net_bridge *br, const struct net_bridge_fdb_entry *fdb) { if (test_bit(BR_FDB_LOCAL, &fdb->flags)) return NUD_PERMANENT; else if (test_bit(BR_FDB_STATIC, &fdb->flags)) return NUD_NOARP; else if (has_expired(br, fdb)) return NUD_STALE; else return NUD_REACHABLE; } static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br, const struct net_bridge_fdb_entry *fdb, u32 portid, u32 seq, int type, unsigned int flags) { const struct net_bridge_port *dst = READ_ONCE(fdb->dst); unsigned long now = jiffies; struct nda_cacheinfo ci; struct nlmsghdr *nlh; struct ndmsg *ndm; u32 ext_flags = 0; nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags); if (nlh == NULL) return -EMSGSIZE; ndm = nlmsg_data(nlh); ndm->ndm_family = AF_BRIDGE; ndm->ndm_pad1 = 0; ndm->ndm_pad2 = 0; ndm->ndm_flags = 0; ndm->ndm_type = 0; ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex; ndm->ndm_state = fdb_to_nud(br, fdb); if (test_bit(BR_FDB_OFFLOADED, &fdb->flags)) ndm->ndm_flags |= NTF_OFFLOADED; if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) ndm->ndm_flags |= NTF_EXT_LEARNED; if (test_bit(BR_FDB_STICKY, &fdb->flags)) ndm->ndm_flags |= NTF_STICKY; if (test_bit(BR_FDB_LOCKED, &fdb->flags)) ext_flags |= NTF_EXT_LOCKED; if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr)) goto nla_put_failure; if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex)) goto nla_put_failure; if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags)) goto nla_put_failure; ci.ndm_used = jiffies_to_clock_t(now - fdb->used); ci.ndm_confirmed = 0; ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); ci.ndm_refcnt = 0; if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) goto nla_put_failure; if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->key.vlan_id)) goto nla_put_failure; if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) { struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS); u8 notify_bits = FDB_NOTIFY_BIT; if (!nest) goto nla_put_failure; if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) notify_bits |= FDB_NOTIFY_INACTIVE_BIT; if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) { nla_nest_cancel(skb, nest); goto nla_put_failure; } nla_nest_end(skb, nest); } nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static inline size_t fdb_nlmsg_size(void) { return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ + nla_total_size(sizeof(u32)) /* NDA_MASTER */ + nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */ + nla_total_size(sizeof(u16)) /* NDA_VLAN */ + nla_total_size(sizeof(struct nda_cacheinfo)) + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */ + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */ } static void fdb_notify(struct net_bridge *br, const struct net_bridge_fdb_entry *fdb, int type, bool swdev_notify) { struct net *net = dev_net(br->dev); struct sk_buff *skb; int err = -ENOBUFS; if (swdev_notify) br_switchdev_fdb_notify(br, fdb, type); skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) goto errout; err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0); if (err < 0) { /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); return; errout: rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl, const unsigned char *addr, __u16 vid) { struct net_bridge_fdb_key key; WARN_ON_ONCE(!rcu_read_lock_held()); key.vlan_id = vid; memcpy(key.addr.addr, addr, sizeof(key.addr.addr)); return rhashtable_lookup(tbl, &key, br_fdb_rht_params); } /* requires bridge hash_lock */ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br, const unsigned char *addr, __u16 vid) { struct net_bridge_fdb_entry *fdb; lockdep_assert_held_once(&br->hash_lock); rcu_read_lock(); fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid); rcu_read_unlock(); return fdb; } struct net_device *br_fdb_find_port(const struct net_device *br_dev, const unsigned char *addr, __u16 vid) { struct net_bridge_fdb_entry *f; struct net_device *dev = NULL; struct net_bridge *br; ASSERT_RTNL(); if (!netif_is_bridge_master(br_dev)) return NULL; br = netdev_priv(br_dev); rcu_read_lock(); f = br_fdb_find_rcu(br, addr, vid); if (f && f->dst) dev = f->dst->dev; rcu_read_unlock(); return dev; } EXPORT_SYMBOL_GPL(br_fdb_find_port); struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br, const unsigned char *addr, __u16 vid) { return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid); } /* When a static FDB entry is added, the mac address from the entry is * added to the bridge private HW address list and all required ports * are then updated with the new information. * Called under RTNL. */ static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr) { int err; struct net_bridge_port *p; ASSERT_RTNL(); list_for_each_entry(p, &br->port_list, list) { if (!br_promisc_port(p)) { err = dev_uc_add(p->dev, addr); if (err) goto undo; } } return; undo: list_for_each_entry_continue_reverse(p, &br->port_list, list) { if (!br_promisc_port(p)) dev_uc_del(p->dev, addr); } } /* When a static FDB entry is deleted, the HW address from that entry is * also removed from the bridge private HW address list and updates all * the ports with needed information. * Called under RTNL. */ static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr) { struct net_bridge_port *p; ASSERT_RTNL(); list_for_each_entry(p, &br->port_list, list) { if (!br_promisc_port(p)) dev_uc_del(p->dev, addr); } } static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f, bool swdev_notify) { trace_fdb_delete(br, f); if (test_bit(BR_FDB_STATIC, &f->flags)) fdb_del_hw_addr(br, f->key.addr.addr); hlist_del_init_rcu(&f->fdb_node); rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode, br_fdb_rht_params); if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags)) atomic_dec(&br->fdb_n_learned); fdb_notify(br, f, RTM_DELNEIGH, swdev_notify); kfree_rcu(f, rcu); } /* Delete a local entry if no other port had the same address. * * This function should only be called on entries with BR_FDB_LOCAL set, * so even with BR_FDB_ADDED_BY_USER cleared we never need to increase * the accounting for dynamically learned entries again. */ static void fdb_delete_local(struct net_bridge *br, const struct net_bridge_port *p, struct net_bridge_fdb_entry *f) { const unsigned char *addr = f->key.addr.addr; struct net_bridge_vlan_group *vg; const struct net_bridge_vlan *v; struct net_bridge_port *op; u16 vid = f->key.vlan_id; /* Maybe another port has same hw addr? */ list_for_each_entry(op, &br->port_list, list) { vg = nbp_vlan_group(op); if (op != p && ether_addr_equal(op->dev->dev_addr, addr) && (!vid || br_vlan_find(vg, vid))) { f->dst = op; clear_bit(BR_FDB_ADDED_BY_USER, &f->flags); return; } } vg = br_vlan_group(br); v = br_vlan_find(vg, vid); /* Maybe bridge device has same hw addr? */ if (p && ether_addr_equal(br->dev->dev_addr, addr) && (!vid || (v && br_vlan_should_use(v)))) { f->dst = NULL; clear_bit(BR_FDB_ADDED_BY_USER, &f->flags); return; } fdb_delete(br, f, true); } void br_fdb_find_delete_local(struct net_bridge *br, const struct net_bridge_port *p, const unsigned char *addr, u16 vid) { struct net_bridge_fdb_entry *f; spin_lock_bh(&br->hash_lock); f = br_fdb_find(br, addr, vid); if (f && test_bit(BR_FDB_LOCAL, &f->flags) && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p) fdb_delete_local(br, p, f); spin_unlock_bh(&br->hash_lock); } static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, __u16 vid, unsigned long flags) { bool learned = !test_bit(BR_FDB_ADDED_BY_USER, &flags) && !test_bit(BR_FDB_LOCAL, &flags); u32 max_learned = READ_ONCE(br->fdb_max_learned); struct net_bridge_fdb_entry *fdb; int err; if (likely(learned)) { int n_learned = atomic_read(&br->fdb_n_learned); if (unlikely(max_learned && n_learned >= max_learned)) return NULL; __set_bit(BR_FDB_DYNAMIC_LEARNED, &flags); } fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); if (!fdb) return NULL; memcpy(fdb->key.addr.addr, addr, ETH_ALEN); WRITE_ONCE(fdb->dst, source); fdb->key.vlan_id = vid; fdb->flags = flags; fdb->updated = fdb->used = jiffies; err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode, br_fdb_rht_params); if (err) { kmem_cache_free(br_fdb_cache, fdb); return NULL; } if (likely(learned)) atomic_inc(&br->fdb_n_learned); hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list); return fdb; } static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid) { struct net_bridge_fdb_entry *fdb; if (!is_valid_ether_addr(addr)) return -EINVAL; fdb = br_fdb_find(br, addr, vid); if (fdb) { /* it is okay to have multiple ports with same * address, just use the first one. */ if (test_bit(BR_FDB_LOCAL, &fdb->flags)) return 0; br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n", source ? source->dev->name : br->dev->name, addr, vid); fdb_delete(br, fdb, true); } fdb = fdb_create(br, source, addr, vid, BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC)); if (!fdb) return -ENOMEM; fdb_add_hw_addr(br, addr); fdb_notify(br, fdb, RTM_NEWNEIGH, true); return 0; } void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr) { struct net_bridge_vlan_group *vg; struct net_bridge_fdb_entry *f; struct net_bridge *br = p->br; struct net_bridge_vlan *v; spin_lock_bh(&br->hash_lock); vg = nbp_vlan_group(p); hlist_for_each_entry(f, &br->fdb_list, fdb_node) { if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) { /* delete old one */ fdb_delete_local(br, p, f); /* if this port has no vlan information * configured, we can safely be done at * this point. */ if (!vg || !vg->num_vlans) goto insert; } } insert: /* insert new address, may fail if invalid address or dup. */ fdb_add_local(br, p, newaddr, 0); if (!vg || !vg->num_vlans) goto done; /* Now add entries for every VLAN configured on the port. * This function runs under RTNL so the bitmap will not change * from under us. */ list_for_each_entry(v, &vg->vlan_list, vlist) fdb_add_local(br, p, newaddr, v->vid); done: spin_unlock_bh(&br->hash_lock); } void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) { struct net_bridge_vlan_group *vg; struct net_bridge_fdb_entry *f; struct net_bridge_vlan *v; spin_lock_bh(&br->hash_lock); /* If old entry was unassociated with any port, then delete it. */ f = br_fdb_find(br, br->dev->dev_addr, 0); if (f && test_bit(BR_FDB_LOCAL, &f->flags) && !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) fdb_delete_local(br, NULL, f); fdb_add_local(br, NULL, newaddr, 0); vg = br_vlan_group(br); if (!vg || !vg->num_vlans) goto out; /* Now remove and add entries for every VLAN configured on the * bridge. This function runs under RTNL so the bitmap will not * change from under us. */ list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; f = br_fdb_find(br, br->dev->dev_addr, v->vid); if (f && test_bit(BR_FDB_LOCAL, &f->flags) && !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) fdb_delete_local(br, NULL, f); fdb_add_local(br, NULL, newaddr, v->vid); } out: spin_unlock_bh(&br->hash_lock); } void br_fdb_cleanup(struct work_struct *work) { struct net_bridge *br = container_of(work, struct net_bridge, gc_work.work); struct net_bridge_fdb_entry *f = NULL; unsigned long delay = hold_time(br); unsigned long work_delay = delay; unsigned long now = jiffies; /* this part is tricky, in order to avoid blocking learning and * consequently forwarding, we rely on rcu to delete objects with * delayed freeing allowing us to continue traversing */ rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { unsigned long this_timer = f->updated + delay; if (test_bit(BR_FDB_STATIC, &f->flags) || test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) { if (test_bit(BR_FDB_NOTIFY, &f->flags)) { if (time_after(this_timer, now)) work_delay = min(work_delay, this_timer - now); else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &f->flags)) fdb_notify(br, f, RTM_NEWNEIGH, false); } continue; } if (time_after(this_timer, now)) { work_delay = min(work_delay, this_timer - now); } else { spin_lock_bh(&br->hash_lock); if (!hlist_unhashed(&f->fdb_node)) fdb_delete(br, f, true); spin_unlock_bh(&br->hash_lock); } } rcu_read_unlock(); /* Cleanup minimum 10 milliseconds apart */ work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10)); mod_delayed_work(system_long_wq, &br->gc_work, work_delay); } static bool __fdb_flush_matches(const struct net_bridge *br, const struct net_bridge_fdb_entry *f, const struct net_bridge_fdb_flush_desc *desc) { const struct net_bridge_port *dst = READ_ONCE(f->dst); int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex; if (desc->vlan_id && desc->vlan_id != f->key.vlan_id) return false; if (desc->port_ifindex && desc->port_ifindex != port_ifidx) return false; if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags) return false; return true; } /* Flush forwarding database entries matching the description */ void br_fdb_flush(struct net_bridge *br, const struct net_bridge_fdb_flush_desc *desc) { struct net_bridge_fdb_entry *f; rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { if (!__fdb_flush_matches(br, f, desc)) continue; spin_lock_bh(&br->hash_lock); if (!hlist_unhashed(&f->fdb_node)) fdb_delete(br, f, true); spin_unlock_bh(&br->hash_lock); } rcu_read_unlock(); } static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state) { unsigned long flags = 0; if (ndm_state & NUD_PERMANENT) __set_bit(BR_FDB_LOCAL, &flags); if (ndm_state & NUD_NOARP) __set_bit(BR_FDB_STATIC, &flags); return flags; } static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags) { unsigned long flags = 0; if (ndm_flags & NTF_USE) __set_bit(BR_FDB_ADDED_BY_USER, &flags); if (ndm_flags & NTF_EXT_LEARNED) __set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags); if (ndm_flags & NTF_OFFLOADED) __set_bit(BR_FDB_OFFLOADED, &flags); if (ndm_flags & NTF_STICKY) __set_bit(BR_FDB_STICKY, &flags); return flags; } static int __fdb_flush_validate_ifindex(const struct net_bridge *br, int ifindex, struct netlink_ext_ack *extack) { const struct net_device *dev; dev = __dev_get_by_index(dev_net(br->dev), ifindex); if (!dev) { NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex"); return -ENODEV; } if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) { NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port"); return -EINVAL; } if (netif_is_bridge_master(dev) && dev != br->dev) { NL_SET_ERR_MSG_MOD(extack, "Flush bridge device does not match target bridge device"); return -EINVAL; } if (netif_is_bridge_port(dev)) { struct net_bridge_port *p = br_port_get_rtnl(dev); if (p->br != br) { NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device"); return -EINVAL; } } return 0; } static const struct nla_policy br_fdb_del_bulk_policy[NDA_MAX + 1] = { [NDA_VLAN] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2), [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1), [NDA_NDM_STATE_MASK] = { .type = NLA_U16 }, [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 }, }; int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev, struct netlink_ext_ack *extack) { struct net_bridge_fdb_flush_desc desc = {}; struct ndmsg *ndm = nlmsg_data(nlh); struct net_bridge_port *p = NULL; struct nlattr *tb[NDA_MAX + 1]; struct net_bridge *br; u8 ndm_flags; int err; ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS; err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, br_fdb_del_bulk_policy, extack); if (err) return err; if (netif_is_bridge_master(dev)) { br = netdev_priv(dev); } else { p = br_port_get_rtnl(dev); if (!p) { NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port"); return -EINVAL; } br = p->br; } if (tb[NDA_VLAN]) desc.vlan_id = nla_get_u16(tb[NDA_VLAN]); if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) { NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set"); return -EINVAL; } if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) { NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set"); return -EINVAL; } desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state); desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags); if (tb[NDA_NDM_STATE_MASK]) { u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]); desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask); } if (tb[NDA_NDM_FLAGS_MASK]) { u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]); desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask); } if (tb[NDA_IFINDEX]) { int ifidx = nla_get_s32(tb[NDA_IFINDEX]); err = __fdb_flush_validate_ifindex(br, ifidx, extack); if (err) return err; desc.port_ifindex = ifidx; } else if (p) { /* flush was invoked with port device and NTF_MASTER */ desc.port_ifindex = p->dev->ifindex; } br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n", desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask); br_fdb_flush(br, &desc); return 0; } /* Flush all entries referring to a specific port. * if do_all is set also flush static entries * if vid is set delete all entries that match the vlan_id */ void br_fdb_delete_by_port(struct net_bridge *br, const struct net_bridge_port *p, u16 vid, int do_all) { struct net_bridge_fdb_entry *f; struct hlist_node *tmp; spin_lock_bh(&br->hash_lock); hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) { if (f->dst != p) continue; if (!do_all) if (test_bit(BR_FDB_STATIC, &f->flags) || (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) && !test_bit(BR_FDB_OFFLOADED, &f->flags)) || (vid && f->key.vlan_id != vid)) continue; if (test_bit(BR_FDB_LOCAL, &f->flags)) fdb_delete_local(br, p, f); else fdb_delete(br, f, true); } spin_unlock_bh(&br->hash_lock); } #if IS_ENABLED(CONFIG_ATM_LANE) /* Interface used by ATM LANE hook to test * if an addr is on some other bridge port */ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) { struct net_bridge_fdb_entry *fdb; struct net_bridge_port *port; int ret; rcu_read_lock(); port = br_port_get_rcu(dev); if (!port) ret = 0; else { const struct net_bridge_port *dst = NULL; fdb = br_fdb_find_rcu(port->br, addr, 0); if (fdb) dst = READ_ONCE(fdb->dst); ret = dst && dst->dev != dev && dst->state == BR_STATE_FORWARDING; } rcu_read_unlock(); return ret; } #endif /* CONFIG_ATM_LANE */ /* * Fill buffer with forwarding table records in * the API format. */ int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long maxnum, unsigned long skip) { struct net_bridge_fdb_entry *f; struct __fdb_entry *fe = buf; int num = 0; memset(buf, 0, maxnum*sizeof(struct __fdb_entry)); rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { if (num >= maxnum) break; if (has_expired(br, f)) continue; /* ignore pseudo entry for local MAC address */ if (!f->dst) continue; if (skip) { --skip; continue; } /* convert from internal format to API */ memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN); /* due to ABI compat need to split into hi/lo */ fe->port_no = f->dst->port_no; fe->port_hi = f->dst->port_no >> 8; fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags); if (!test_bit(BR_FDB_STATIC, &f->flags)) fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated); ++fe; ++num; } rcu_read_unlock(); return num; } /* Add entry for local address of interface */ int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid) { int ret; spin_lock_bh(&br->hash_lock); ret = fdb_add_local(br, source, addr, vid); spin_unlock_bh(&br->hash_lock); return ret; } /* returns true if the fdb was modified */ static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb) { return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) && test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)); } void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source, const unsigned char *addr, u16 vid, unsigned long flags) { struct net_bridge_fdb_entry *fdb; /* some users want to always flood. */ if (hold_time(br) == 0) return; fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid); if (likely(fdb)) { /* attempt to update an entry for a local interface */ if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) { if (net_ratelimit()) br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n", source->dev->name, addr, vid); } else { unsigned long now = jiffies; bool fdb_modified = false; if (now != fdb->updated) { fdb->updated = now; fdb_modified = __fdb_mark_active(fdb); } /* fastpath: update of existing entry */ if (unlikely(source != READ_ONCE(fdb->dst) && !test_bit(BR_FDB_STICKY, &fdb->flags))) { br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH); WRITE_ONCE(fdb->dst, source); fdb_modified = true; /* Take over HW learned entry */ if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))) clear_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags); /* Clear locked flag when roaming to an * unlocked port. */ if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags))) clear_bit(BR_FDB_LOCKED, &fdb->flags); } if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) { set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags)) atomic_dec(&br->fdb_n_learned); } if (unlikely(fdb_modified)) { trace_br_fdb_update(br, source, addr, vid, flags); fdb_notify(br, fdb, RTM_NEWNEIGH, true); } } } else { spin_lock(&br->hash_lock); fdb = fdb_create(br, source, addr, vid, flags); if (fdb) { trace_br_fdb_update(br, source, addr, vid, flags); fdb_notify(br, fdb, RTM_NEWNEIGH, true); } /* else we lose race and someone else inserts * it first, don't bother updating */ spin_unlock(&br->hash_lock); } } /* Dump information about entries, in response to GETNEIGH */ int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, struct net_device *filter_dev, int *idx) { struct ndo_fdb_dump_context *ctx = (void *)cb->ctx; struct net_bridge *br = netdev_priv(dev); struct net_bridge_fdb_entry *f; int err = 0; if (!netif_is_bridge_master(dev)) return err; if (!filter_dev) { err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx); if (err < 0) return err; } rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { if (*idx < ctx->fdb_idx) goto skip; if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) { if (filter_dev != dev) goto skip; /* !f->dst is a special case for bridge * It means the MAC belongs to the bridge * Therefore need a little more filtering * we only want to dump the !f->dst case */ if (f->dst) goto skip; } if (!filter_dev && f->dst) goto skip; err = fdb_fill_info(skb, br, f, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, RTM_NEWNEIGH, NLM_F_MULTI); if (err < 0) break; skip: *idx += 1; } rcu_read_unlock(); return err; } int br_fdb_get(struct sk_buff *skb, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u32 portid, u32 seq, struct netlink_ext_ack *extack) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_fdb_entry *f; int err = 0; rcu_read_lock(); f = br_fdb_find_rcu(br, addr, vid); if (!f) { NL_SET_ERR_MSG(extack, "Fdb entry not found"); err = -ENOENT; goto errout; } err = fdb_fill_info(skb, br, f, portid, seq, RTM_NEWNEIGH, 0); errout: rcu_read_unlock(); return err; } /* returns true if the fdb is modified */ static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify) { bool modified = false; /* allow to mark an entry as inactive, usually done on creation */ if ((notify & FDB_NOTIFY_INACTIVE_BIT) && !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags)) modified = true; if ((notify & FDB_NOTIFY_BIT) && !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) { /* enabled activity tracking */ modified = true; } else if (!(notify & FDB_NOTIFY_BIT) && test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) { /* disabled activity tracking, clear notify state */ clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags); modified = true; } return modified; } /* Update (create or replace) forwarding database entry */ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source, const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid, struct nlattr *nfea_tb[]) { bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY); bool refresh = !nfea_tb[NFEA_DONT_REFRESH]; struct net_bridge_fdb_entry *fdb; u16 state = ndm->ndm_state; bool modified = false; u8 notify = 0; /* If the port cannot learn allow only local and static entries */ if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) && !(source->state == BR_STATE_LEARNING || source->state == BR_STATE_FORWARDING)) return -EPERM; if (!source && !(state & NUD_PERMANENT)) { pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n", br->dev->name); return -EINVAL; } if (is_sticky && (state & NUD_PERMANENT)) return -EINVAL; if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) { notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]); if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) || (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT) return -EINVAL; } fdb = br_fdb_find(br, addr, vid); if (fdb == NULL) { if (!(flags & NLM_F_CREATE)) return -ENOENT; fdb = fdb_create(br, source, addr, vid, BIT(BR_FDB_ADDED_BY_USER)); if (!fdb) return -ENOMEM; modified = true; } else { if (flags & NLM_F_EXCL) return -EEXIST; if (READ_ONCE(fdb->dst) != source) { WRITE_ONCE(fdb->dst, source); modified = true; } set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags)) atomic_dec(&br->fdb_n_learned); } if (fdb_to_nud(br, fdb) != state) { if (state & NUD_PERMANENT) { set_bit(BR_FDB_LOCAL, &fdb->flags); if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags)) fdb_add_hw_addr(br, addr); } else if (state & NUD_NOARP) { clear_bit(BR_FDB_LOCAL, &fdb->flags); if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags)) fdb_add_hw_addr(br, addr); } else { clear_bit(BR_FDB_LOCAL, &fdb->flags); if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags)) fdb_del_hw_addr(br, addr); } modified = true; } if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) { change_bit(BR_FDB_STICKY, &fdb->flags); modified = true; } if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags)) modified = true; if (fdb_handle_notify(fdb, notify)) modified = true; fdb->used = jiffies; if (modified) { if (refresh) fdb->updated = jiffies; fdb_notify(br, fdb, RTM_NEWNEIGH, true); } return 0; } static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[], bool *notified, struct netlink_ext_ack *extack) { int err = 0; if (ndm->ndm_flags & NTF_USE) { if (!p) { pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n", br->dev->name); return -EINVAL; } if (!nbp_state_should_learn(p)) return 0; local_bh_disable(); rcu_read_lock(); br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER)); rcu_read_unlock(); local_bh_enable(); } else if (ndm->ndm_flags & NTF_EXT_LEARNED) { if (!p && !(ndm->ndm_state & NUD_PERMANENT)) { NL_SET_ERR_MSG_MOD(extack, "FDB entry towards bridge must be permanent"); return -EINVAL; } err = br_fdb_external_learn_add(br, p, addr, vid, false, true); } else { spin_lock_bh(&br->hash_lock); err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb); spin_unlock_bh(&br->hash_lock); } if (!err) *notified = true; return err; } static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = { [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 }, [NFEA_DONT_REFRESH] = { .type = NLA_FLAG }, }; /* Add new permanent fdb entry with RTM_NEWNEIGH */ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, u16 nlh_flags, bool *notified, struct netlink_ext_ack *extack) { struct nlattr *nfea_tb[NFEA_MAX + 1], *attr; struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; struct net_bridge_vlan *v; struct net_bridge *br = NULL; u32 ext_flags = 0; int err = 0; trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags); if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); return -EINVAL; } if (is_zero_ether_addr(addr)) { pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n"); return -EINVAL; } if (netif_is_bridge_master(dev)) { br = netdev_priv(dev); vg = br_vlan_group(br); } else { p = br_port_get_rtnl(dev); if (!p) { pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n", dev->name); return -EINVAL; } br = p->br; vg = nbp_vlan_group(p); } if (tb[NDA_FLAGS_EXT]) ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]); if (ext_flags & NTF_EXT_LOCKED) { NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set"); return -EINVAL; } if (tb[NDA_FDB_EXT_ATTRS]) { attr = tb[NDA_FDB_EXT_ATTRS]; err = nla_parse_nested(nfea_tb, NFEA_MAX, attr, br_nda_fdb_pol, extack); if (err) return err; } else { memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1)); } if (vid) { v = br_vlan_find(vg, vid); if (!v || !br_vlan_should_use(v)) { pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name); return -EINVAL; } /* VID was specified, so use it. */ err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb, notified, extack); } else { err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb, notified, extack); if (err || !vg || !vg->num_vlans) goto out; /* We have vlans configured on this port and user didn't * specify a VLAN. To be nice, add/update entry for every * vlan on this port. */ list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid, nfea_tb, notified, extack); if (err) goto out; } } out: return err; } static int fdb_delete_by_addr_and_port(struct net_bridge *br, const struct net_bridge_port *p, const u8 *addr, u16 vlan, bool *notified) { struct net_bridge_fdb_entry *fdb; fdb = br_fdb_find(br, addr, vlan); if (!fdb || READ_ONCE(fdb->dst) != p) return -ENOENT; fdb_delete(br, fdb, true); *notified = true; return 0; } static int __br_fdb_delete(struct net_bridge *br, const struct net_bridge_port *p, const unsigned char *addr, u16 vid, bool *notified) { int err; spin_lock_bh(&br->hash_lock); err = fdb_delete_by_addr_and_port(br, p, addr, vid, notified); spin_unlock_bh(&br->hash_lock); return err; } /* Remove neighbor entry with RTM_DELNEIGH */ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, bool *notified, struct netlink_ext_ack *extack) { struct net_bridge_vlan_group *vg; struct net_bridge_port *p = NULL; struct net_bridge *br; int err; if (netif_is_bridge_master(dev)) { br = netdev_priv(dev); vg = br_vlan_group(br); } else { p = br_port_get_rtnl(dev); if (!p) { pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", dev->name); return -EINVAL; } vg = nbp_vlan_group(p); br = p->br; } if (vid) { err = __br_fdb_delete(br, p, addr, vid, notified); } else { struct net_bridge_vlan *v; err = -ENOENT; err &= __br_fdb_delete(br, p, addr, 0, notified); if (!vg || !vg->num_vlans) return err; list_for_each_entry(v, &vg->vlan_list, vlist) { if (!br_vlan_should_use(v)) continue; err &= __br_fdb_delete(br, p, addr, v->vid, notified); } } return err; } int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p) { struct net_bridge_fdb_entry *f, *tmp; int err = 0; ASSERT_RTNL(); /* the key here is that static entries change only under rtnl */ rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { /* We only care for static entries */ if (!test_bit(BR_FDB_STATIC, &f->flags)) continue; err = dev_uc_add(p->dev, f->key.addr.addr); if (err) goto rollback; } done: rcu_read_unlock(); return err; rollback: hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) { /* We only care for static entries */ if (!test_bit(BR_FDB_STATIC, &tmp->flags)) continue; if (tmp == f) break; dev_uc_del(p->dev, tmp->key.addr.addr); } goto done; } void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p) { struct net_bridge_fdb_entry *f; ASSERT_RTNL(); rcu_read_lock(); hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) { /* We only care for static entries */ if (!test_bit(BR_FDB_STATIC, &f->flags)) continue; dev_uc_del(p->dev, f->key.addr.addr); } rcu_read_unlock(); } int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid, bool locked, bool swdev_notify) { struct net_bridge_fdb_entry *fdb; bool modified = false; int err = 0; trace_br_fdb_external_learn_add(br, p, addr, vid); if (locked && (!p || !(p->flags & BR_PORT_MAB))) return -EINVAL; spin_lock_bh(&br->hash_lock); fdb = br_fdb_find(br, addr, vid); if (!fdb) { unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN); if (swdev_notify) flags |= BIT(BR_FDB_ADDED_BY_USER); if (!p) flags |= BIT(BR_FDB_LOCAL); if (locked) flags |= BIT(BR_FDB_LOCKED); fdb = fdb_create(br, p, addr, vid, flags); if (!fdb) { err = -ENOMEM; goto err_unlock; } fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); } else { if (locked && (!test_bit(BR_FDB_LOCKED, &fdb->flags) || READ_ONCE(fdb->dst) != p)) { err = -EINVAL; goto err_unlock; } fdb->updated = jiffies; if (READ_ONCE(fdb->dst) != p) { WRITE_ONCE(fdb->dst, p); modified = true; } if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) { /* Refresh entry */ fdb->used = jiffies; } else { modified = true; } if (locked != test_bit(BR_FDB_LOCKED, &fdb->flags)) { change_bit(BR_FDB_LOCKED, &fdb->flags); modified = true; } if (swdev_notify) set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags); if (!p) set_bit(BR_FDB_LOCAL, &fdb->flags); if ((swdev_notify || !p) && test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags)) atomic_dec(&br->fdb_n_learned); if (modified) fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify); } err_unlock: spin_unlock_bh(&br->hash_lock); return err; } int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid, bool swdev_notify) { struct net_bridge_fdb_entry *fdb; int err = 0; spin_lock_bh(&br->hash_lock); fdb = br_fdb_find(br, addr, vid); if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) fdb_delete(br, fdb, swdev_notify); else err = -ENOENT; spin_unlock_bh(&br->hash_lock); return err; } void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p, const unsigned char *addr, u16 vid, bool offloaded) { struct net_bridge_fdb_entry *fdb; spin_lock_bh(&br->hash_lock); fdb = br_fdb_find(br, addr, vid); if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags)) change_bit(BR_FDB_OFFLOADED, &fdb->flags); spin_unlock_bh(&br->hash_lock); } void br_fdb_clear_offload(const struct net_device *dev, u16 vid) { struct net_bridge_fdb_entry *f; struct net_bridge_port *p; ASSERT_RTNL(); p = br_port_get_rtnl(dev); if (!p) return; spin_lock_bh(&p->br->hash_lock); hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) { if (f->dst == p && f->key.vlan_id == vid) clear_bit(BR_FDB_OFFLOADED, &f->flags); } spin_unlock_bh(&p->br->hash_lock); } EXPORT_SYMBOL_GPL(br_fdb_clear_offload); |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __IEEE802154_CORE_H #define __IEEE802154_CORE_H #include <net/cfg802154.h> struct cfg802154_registered_device { const struct cfg802154_ops *ops; struct list_head list; /* wpan_phy index, internal only */ int wpan_phy_idx; /* also protected by devlist_mtx */ int opencount; wait_queue_head_t dev_wait; /* protected by RTNL only */ int num_running_ifaces; /* associated wpan interfaces, protected by rtnl or RCU */ struct list_head wpan_dev_list; int devlist_generation, wpan_dev_id; /* must be last because of the way we do wpan_phy_priv(), * and it should at least be aligned to NETDEV_ALIGN */ struct wpan_phy wpan_phy __aligned(NETDEV_ALIGN); }; static inline struct cfg802154_registered_device * wpan_phy_to_rdev(struct wpan_phy *wpan_phy) { BUG_ON(!wpan_phy); return container_of(wpan_phy, struct cfg802154_registered_device, wpan_phy); } extern struct list_head cfg802154_rdev_list; extern int cfg802154_rdev_list_generation; int cfg802154_switch_netns(struct cfg802154_registered_device *rdev, struct net *net); /* free object */ void cfg802154_dev_free(struct cfg802154_registered_device *rdev); struct cfg802154_registered_device * cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx); struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx); #endif /* __IEEE802154_CORE_H */ |
24 24 9 17 9 12 10 14 22 10 10 5 12 6 6 6 6 6 6 6 6 5 1 7 3 1 6 6 6 4 1 4 4 4 3 1 6 2 1 4 3 1 4 4 3 1 1 6 2 2 6 5 12 12 17 17 17 17 37 36 6 4 2 5 1 10 2 3 1 2 1 1 1 12 10 3 2 2 1 2 12 5 9 4 1 4 1 3 8 8 4 5 1 11 1 11 1 12 12 3 9 1 17 16 2 69 3 20 1 1 12 1 1 1 12 18 6 18 5 28 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2009 Red Hat, Inc. * Author: Michael S. Tsirkin <mst@redhat.com> * * virtio-net server in host kernel. */ #include <linux/compat.h> #include <linux/eventfd.h> #include <linux/vhost.h> #include <linux/virtio_net.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/sched/clock.h> #include <linux/sched/signal.h> #include <linux/vmalloc.h> #include <linux/net.h> #include <linux/if_packet.h> #include <linux/if_arp.h> #include <linux/if_tun.h> #include <linux/if_macvlan.h> #include <linux/if_tap.h> #include <linux/if_vlan.h> #include <linux/skb_array.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/xdp.h> #include "vhost.h" static int experimental_zcopytx = 0; module_param(experimental_zcopytx, int, 0444); MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" " 1 -Enable; 0 - Disable"); /* Max number of bytes transferred before requeueing the job. * Using this limit prevents one virtqueue from starving others. */ #define VHOST_NET_WEIGHT 0x80000 /* Max number of packets transferred before requeueing the job. * Using this limit prevents one virtqueue from starving others with small * pkts. */ #define VHOST_NET_PKT_WEIGHT 256 /* MAX number of TX used buffers for outstanding zerocopy */ #define VHOST_MAX_PEND 128 #define VHOST_GOODCOPY_LEN 256 /* * For transmit, used buffer len is unused; we override it to track buffer * status internally; used for zerocopy tx only. */ /* Lower device DMA failed */ #define VHOST_DMA_FAILED_LEN ((__force __virtio32)3) /* Lower device DMA done */ #define VHOST_DMA_DONE_LEN ((__force __virtio32)2) /* Lower device DMA in progress */ #define VHOST_DMA_IN_PROGRESS ((__force __virtio32)1) /* Buffer unused */ #define VHOST_DMA_CLEAR_LEN ((__force __virtio32)0) #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN) enum { VHOST_NET_FEATURES = VHOST_FEATURES | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_ACCESS_PLATFORM) | (1ULL << VIRTIO_F_RING_RESET) }; enum { VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) }; enum { VHOST_NET_VQ_RX = 0, VHOST_NET_VQ_TX = 1, VHOST_NET_VQ_MAX = 2, }; struct vhost_net_ubuf_ref { /* refcount follows semantics similar to kref: * 0: object is released * 1: no outstanding ubufs * >1: outstanding ubufs */ atomic_t refcount; wait_queue_head_t wait; struct vhost_virtqueue *vq; }; #define VHOST_NET_BATCH 64 struct vhost_net_buf { void **queue; int tail; int head; }; struct vhost_net_virtqueue { struct vhost_virtqueue vq; size_t vhost_hlen; size_t sock_hlen; /* vhost zerocopy support fields below: */ /* last used idx for outstanding DMA zerocopy buffers */ int upend_idx; /* For TX, first used idx for DMA done zerocopy buffers * For RX, number of batched heads */ int done_idx; /* Number of XDP frames batched */ int batched_xdp; /* an array of userspace buffers info */ struct ubuf_info_msgzc *ubuf_info; /* Reference counting for outstanding ubufs. * Protected by vq mutex. Writers must also take device mutex. */ struct vhost_net_ubuf_ref *ubufs; struct ptr_ring *rx_ring; struct vhost_net_buf rxq; /* Batched XDP buffs */ struct xdp_buff *xdp; }; struct vhost_net { struct vhost_dev dev; struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX]; struct vhost_poll poll[VHOST_NET_VQ_MAX]; /* Number of TX recently submitted. * Protected by tx vq lock. */ unsigned tx_packets; /* Number of times zerocopy TX recently failed. * Protected by tx vq lock. */ unsigned tx_zcopy_err; /* Flush in progress. Protected by tx vq lock. */ bool tx_flush; /* Private page frag cache */ struct page_frag_cache pf_cache; }; static unsigned vhost_net_zcopy_mask __read_mostly; static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq) { if (rxq->tail != rxq->head) return rxq->queue[rxq->head]; else return NULL; } static int vhost_net_buf_get_size(struct vhost_net_buf *rxq) { return rxq->tail - rxq->head; } static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq) { return rxq->tail == rxq->head; } static void *vhost_net_buf_consume(struct vhost_net_buf *rxq) { void *ret = vhost_net_buf_get_ptr(rxq); ++rxq->head; return ret; } static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq) { struct vhost_net_buf *rxq = &nvq->rxq; rxq->head = 0; rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue, VHOST_NET_BATCH); return rxq->tail; } static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq) { struct vhost_net_buf *rxq = &nvq->rxq; if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, vhost_net_buf_get_size(rxq), tun_ptr_free); rxq->head = rxq->tail = 0; } } static int vhost_net_buf_peek_len(void *ptr) { if (tun_is_xdp_frame(ptr)) { struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); return xdpf->len; } return __skb_array_len_with_tag(ptr); } static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq) { struct vhost_net_buf *rxq = &nvq->rxq; if (!vhost_net_buf_is_empty(rxq)) goto out; if (!vhost_net_buf_produce(nvq)) return 0; out: return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq)); } static void vhost_net_buf_init(struct vhost_net_buf *rxq) { rxq->head = rxq->tail = 0; } static void vhost_net_enable_zcopy(int vq) { vhost_net_zcopy_mask |= 0x1 << vq; } static struct vhost_net_ubuf_ref * vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy) { struct vhost_net_ubuf_ref *ubufs; /* No zero copy backend? Nothing to count. */ if (!zcopy) return NULL; ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL); if (!ubufs) return ERR_PTR(-ENOMEM); atomic_set(&ubufs->refcount, 1); init_waitqueue_head(&ubufs->wait); ubufs->vq = vq; return ubufs; } static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs) { int r = atomic_sub_return(1, &ubufs->refcount); if (unlikely(!r)) wake_up(&ubufs->wait); return r; } static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs) { vhost_net_ubuf_put(ubufs); wait_event(ubufs->wait, !atomic_read(&ubufs->refcount)); } static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs) { vhost_net_ubuf_put_and_wait(ubufs); kfree(ubufs); } static void vhost_net_clear_ubuf_info(struct vhost_net *n) { int i; for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { kfree(n->vqs[i].ubuf_info); n->vqs[i].ubuf_info = NULL; } } static int vhost_net_set_ubuf_info(struct vhost_net *n) { bool zcopy; int i; for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { zcopy = vhost_net_zcopy_mask & (0x1 << i); if (!zcopy) continue; n->vqs[i].ubuf_info = kmalloc_array(UIO_MAXIOV, sizeof(*n->vqs[i].ubuf_info), GFP_KERNEL); if (!n->vqs[i].ubuf_info) goto err; } return 0; err: vhost_net_clear_ubuf_info(n); return -ENOMEM; } static void vhost_net_vq_reset(struct vhost_net *n) { int i; vhost_net_clear_ubuf_info(n); for (i = 0; i < VHOST_NET_VQ_MAX; i++) { n->vqs[i].done_idx = 0; n->vqs[i].upend_idx = 0; n->vqs[i].ubufs = NULL; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; vhost_net_buf_init(&n->vqs[i].rxq); } } static void vhost_net_tx_packet(struct vhost_net *net) { ++net->tx_packets; if (net->tx_packets < 1024) return; net->tx_packets = 0; net->tx_zcopy_err = 0; } static void vhost_net_tx_err(struct vhost_net *net) { ++net->tx_zcopy_err; } static bool vhost_net_tx_select_zcopy(struct vhost_net *net) { /* TX flush waits for outstanding DMAs to be done. * Don't start new DMAs. */ return !net->tx_flush && net->tx_packets / 64 >= net->tx_zcopy_err; } static bool vhost_sock_zcopy(struct socket *sock) { return unlikely(experimental_zcopytx) && sock_flag(sock->sk, SOCK_ZEROCOPY); } static bool vhost_sock_xdp(struct socket *sock) { return sock_flag(sock->sk, SOCK_XDP); } /* In case of DMA done not in order in lower device driver for some reason. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM * guest used idx. */ static void vhost_zerocopy_signal_used(struct vhost_net *net, struct vhost_virtqueue *vq) { struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); int i, add; int j = 0; for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) { if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) vhost_net_tx_err(net); if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { vq->heads[i].len = VHOST_DMA_CLEAR_LEN; ++j; } else break; } while (j) { add = min(UIO_MAXIOV - nvq->done_idx, j); vhost_add_used_and_signal_n(vq->dev, vq, &vq->heads[nvq->done_idx], add); nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV; j -= add; } } static void vhost_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *ubuf_base, bool success) { struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base); struct vhost_net_ubuf_ref *ubufs = ubuf->ctx; struct vhost_virtqueue *vq = ubufs->vq; int cnt; rcu_read_lock_bh(); /* set len to mark this desc buffers done DMA */ vq->heads[ubuf->desc].len = success ? VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; cnt = vhost_net_ubuf_put(ubufs); /* * Trigger polling thread if guest stopped submitting new buffers: * in this case, the refcount after decrement will eventually reach 1. * We also trigger polling periodically after each 16 packets * (the value 16 here is more or less arbitrary, it's tuned to trigger * less than 10% of times). */ if (cnt <= 1 || !(cnt % 16)) vhost_poll_queue(&vq->poll); rcu_read_unlock_bh(); } static const struct ubuf_info_ops vhost_ubuf_ops = { .complete = vhost_zerocopy_complete, }; static inline unsigned long busy_clock(void) { return local_clock() >> 10; } static bool vhost_can_busy_poll(unsigned long endtime) { return likely(!need_resched() && !time_after(busy_clock(), endtime) && !signal_pending(current)); } static void vhost_net_disable_vq(struct vhost_net *n, struct vhost_virtqueue *vq) { struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); struct vhost_poll *poll = n->poll + (nvq - n->vqs); if (!vhost_vq_get_backend(vq)) return; vhost_poll_stop(poll); } static int vhost_net_enable_vq(struct vhost_net *n, struct vhost_virtqueue *vq) { struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); struct vhost_poll *poll = n->poll + (nvq - n->vqs); struct socket *sock; sock = vhost_vq_get_backend(vq); if (!sock) return 0; return vhost_poll_start(poll, sock->file); } static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq) { struct vhost_virtqueue *vq = &nvq->vq; struct vhost_dev *dev = vq->dev; if (!nvq->done_idx) return; vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); nvq->done_idx = 0; } static void vhost_tx_batch(struct vhost_net *net, struct vhost_net_virtqueue *nvq, struct socket *sock, struct msghdr *msghdr) { struct tun_msg_ctl ctl = { .type = TUN_MSG_PTR, .num = nvq->batched_xdp, .ptr = nvq->xdp, }; int i, err; if (nvq->batched_xdp == 0) goto signal_used; msghdr->msg_control = &ctl; msghdr->msg_controllen = sizeof(ctl); err = sock->ops->sendmsg(sock, msghdr, 0); if (unlikely(err < 0)) { vq_err(&nvq->vq, "Fail to batch sending packets\n"); /* free pages owned by XDP; since this is an unlikely error path, * keep it simple and avoid more complex bulk update for the * used pages */ for (i = 0; i < nvq->batched_xdp; ++i) put_page(virt_to_head_page(nvq->xdp[i].data)); nvq->batched_xdp = 0; nvq->done_idx = 0; return; } signal_used: vhost_net_signal_used(nvq); nvq->batched_xdp = 0; } static int sock_has_rx_data(struct socket *sock) { if (unlikely(!sock)) return 0; if (sock->ops->peek_len) return sock->ops->peek_len(sock); return skb_queue_empty(&sock->sk->sk_receive_queue); } static void vhost_net_busy_poll_try_queue(struct vhost_net *net, struct vhost_virtqueue *vq) { if (!vhost_vq_avail_empty(&net->dev, vq)) { vhost_poll_queue(&vq->poll); } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { vhost_disable_notify(&net->dev, vq); vhost_poll_queue(&vq->poll); } } static void vhost_net_busy_poll(struct vhost_net *net, struct vhost_virtqueue *rvq, struct vhost_virtqueue *tvq, bool *busyloop_intr, bool poll_rx) { unsigned long busyloop_timeout; unsigned long endtime; struct socket *sock; struct vhost_virtqueue *vq = poll_rx ? tvq : rvq; /* Try to hold the vq mutex of the paired virtqueue. We can't * use mutex_lock() here since we could not guarantee a * consistenet lock ordering. */ if (!mutex_trylock(&vq->mutex)) return; vhost_disable_notify(&net->dev, vq); sock = vhost_vq_get_backend(rvq); busyloop_timeout = poll_rx ? rvq->busyloop_timeout: tvq->busyloop_timeout; preempt_disable(); endtime = busy_clock() + busyloop_timeout; while (vhost_can_busy_poll(endtime)) { if (vhost_vq_has_work(vq)) { *busyloop_intr = true; break; } if ((sock_has_rx_data(sock) && !vhost_vq_avail_empty(&net->dev, rvq)) || !vhost_vq_avail_empty(&net->dev, tvq)) break; cpu_relax(); } preempt_enable(); if (poll_rx || sock_has_rx_data(sock)) vhost_net_busy_poll_try_queue(net, vq); else if (!poll_rx) /* On tx here, sock has no rx data. */ vhost_enable_notify(&net->dev, rvq); mutex_unlock(&vq->mutex); } static int vhost_net_tx_get_vq_desc(struct vhost_net *net, struct vhost_net_virtqueue *tnvq, unsigned int *out_num, unsigned int *in_num, struct msghdr *msghdr, bool *busyloop_intr) { struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_virtqueue *rvq = &rnvq->vq; struct vhost_virtqueue *tvq = &tnvq->vq; int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), out_num, in_num, NULL, NULL); if (r == tvq->num && tvq->busyloop_timeout) { /* Flush batched packets first */ if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq))) vhost_tx_batch(net, tnvq, vhost_vq_get_backend(tvq), msghdr); vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false); r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov), out_num, in_num, NULL, NULL); } return r; } static bool vhost_exceeds_maxpend(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *vq = &nvq->vq; return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV > min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2); } static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter, size_t hdr_size, int out) { /* Skip header. TODO: support TSO. */ size_t len = iov_length(vq->iov, out); iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len); iov_iter_advance(iter, hdr_size); return iov_iter_count(iter); } static int get_tx_bufs(struct vhost_net *net, struct vhost_net_virtqueue *nvq, struct msghdr *msg, unsigned int *out, unsigned int *in, size_t *len, bool *busyloop_intr) { struct vhost_virtqueue *vq = &nvq->vq; int ret; ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr); if (ret < 0 || ret == vq->num) return ret; if (*in) { vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n", *out, *in); return -EFAULT; } /* Sanity check */ *len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out); if (*len == 0) { vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n", *len, nvq->vhost_hlen); return -EFAULT; } return ret; } static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len) { return total_len < VHOST_NET_WEIGHT && !vhost_vq_avail_empty(vq->dev, vq); } #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq, struct iov_iter *from) { struct vhost_virtqueue *vq = &nvq->vq; struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); struct socket *sock = vhost_vq_get_backend(vq); struct virtio_net_hdr *gso; struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp]; struct tun_xdp_hdr *hdr; size_t len = iov_iter_count(from); int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen); int sock_hlen = nvq->sock_hlen; void *buf; int copied; int ret; if (unlikely(len < nvq->sock_hlen)) return -EFAULT; if (SKB_DATA_ALIGN(len + pad) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return -ENOSPC; buflen += SKB_DATA_ALIGN(len + pad); buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL, SMP_CACHE_BYTES); if (unlikely(!buf)) return -ENOMEM; copied = copy_from_iter(buf + offsetof(struct tun_xdp_hdr, gso), sock_hlen, from); if (copied != sock_hlen) { ret = -EFAULT; goto err; } hdr = buf; gso = &hdr->gso; if (!sock_hlen) memset(buf, 0, pad); if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && vhost16_to_cpu(vq, gso->csum_start) + vhost16_to_cpu(vq, gso->csum_offset) + 2 > vhost16_to_cpu(vq, gso->hdr_len)) { gso->hdr_len = cpu_to_vhost16(vq, vhost16_to_cpu(vq, gso->csum_start) + vhost16_to_cpu(vq, gso->csum_offset) + 2); if (vhost16_to_cpu(vq, gso->hdr_len) > len) { ret = -EINVAL; goto err; } } len -= sock_hlen; copied = copy_from_iter(buf + pad, len, from); if (copied != len) { ret = -EFAULT; goto err; } xdp_init_buff(xdp, buflen, NULL); xdp_prepare_buff(xdp, buf, pad, len, true); hdr->buflen = buflen; ++nvq->batched_xdp; return 0; err: page_frag_free(buf); return ret; } static void handle_tx_copy(struct vhost_net *net, struct socket *sock) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *vq = &nvq->vq; unsigned out, in; int head; struct msghdr msg = { .msg_name = NULL, .msg_namelen = 0, .msg_control = NULL, .msg_controllen = 0, .msg_flags = MSG_DONTWAIT, }; size_t len, total_len = 0; int err; int sent_pkts = 0; bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX); do { bool busyloop_intr = false; if (nvq->done_idx == VHOST_NET_BATCH) vhost_tx_batch(net, nvq, sock, &msg); head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, &busyloop_intr); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { if (unlikely(busyloop_intr)) { vhost_poll_queue(&vq->poll); } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { vhost_disable_notify(&net->dev, vq); continue; } break; } total_len += len; /* For simplicity, TX batching is only enabled if * sndbuf is unlimited. */ if (sock_can_batch) { err = vhost_net_build_xdp(nvq, &msg.msg_iter); if (!err) { goto done; } else if (unlikely(err != -ENOSPC)) { vhost_tx_batch(net, nvq, sock, &msg); vhost_discard_vq_desc(vq, 1); vhost_net_enable_vq(net, vq); break; } /* We can't build XDP buff, go for single * packet path but let's flush batched * packets. */ vhost_tx_batch(net, nvq, sock, &msg); msg.msg_control = NULL; } else { if (tx_can_batch(vq, total_len)) msg.msg_flags |= MSG_MORE; else msg.msg_flags &= ~MSG_MORE; } err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) { vhost_discard_vq_desc(vq, 1); vhost_net_enable_vq(net, vq); break; } pr_debug("Fail to send packet: err %d", err); } else if (unlikely(err != len)) pr_debug("Truncated TX packet: len %d != %zd\n", err, len); done: vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head); vq->heads[nvq->done_idx].len = 0; ++nvq->done_idx; } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); vhost_tx_batch(net, nvq, sock, &msg); } static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *vq = &nvq->vq; unsigned out, in; int head; struct msghdr msg = { .msg_name = NULL, .msg_namelen = 0, .msg_control = NULL, .msg_controllen = 0, .msg_flags = MSG_DONTWAIT, }; struct tun_msg_ctl ctl; size_t len, total_len = 0; int err; struct vhost_net_ubuf_ref *ubufs; struct ubuf_info_msgzc *ubuf; bool zcopy_used; int sent_pkts = 0; do { bool busyloop_intr; /* Release DMAs done buffers first */ vhost_zerocopy_signal_used(net, vq); busyloop_intr = false; head = get_tx_bufs(net, nvq, &msg, &out, &in, &len, &busyloop_intr); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; /* Nothing new? Wait for eventfd to tell us they refilled. */ if (head == vq->num) { if (unlikely(busyloop_intr)) { vhost_poll_queue(&vq->poll); } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { vhost_disable_notify(&net->dev, vq); continue; } break; } zcopy_used = len >= VHOST_GOODCOPY_LEN && !vhost_exceeds_maxpend(net) && vhost_net_tx_select_zcopy(net); /* use msg_control to pass vhost zerocopy ubuf info to skb */ if (zcopy_used) { ubuf = nvq->ubuf_info + nvq->upend_idx; vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head); vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; ubuf->ctx = nvq->ubufs; ubuf->desc = nvq->upend_idx; ubuf->ubuf.ops = &vhost_ubuf_ops; ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG; refcount_set(&ubuf->ubuf.refcnt, 1); msg.msg_control = &ctl; ctl.type = TUN_MSG_UBUF; ctl.ptr = &ubuf->ubuf; msg.msg_controllen = sizeof(ctl); ubufs = nvq->ubufs; atomic_inc(&ubufs->refcount); nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV; } else { msg.msg_control = NULL; ubufs = NULL; } total_len += len; if (tx_can_batch(vq, total_len) && likely(!vhost_exceeds_maxpend(net))) { msg.msg_flags |= MSG_MORE; } else { msg.msg_flags &= ~MSG_MORE; } err = sock->ops->sendmsg(sock, &msg, len); if (unlikely(err < 0)) { bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS; if (zcopy_used) { if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS) vhost_net_ubuf_put(ubufs); if (retry) nvq->upend_idx = ((unsigned)nvq->upend_idx - 1) % UIO_MAXIOV; else vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; } if (retry) { vhost_discard_vq_desc(vq, 1); vhost_net_enable_vq(net, vq); break; } pr_debug("Fail to send packet: err %d", err); } else if (unlikely(err != len)) pr_debug("Truncated TX packet: " " len %d != %zd\n", err, len); if (!zcopy_used) vhost_add_used_and_signal(&net->dev, vq, head, 0); else vhost_zerocopy_signal_used(net, vq); vhost_net_tx_packet(net); } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len))); } /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_tx(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *vq = &nvq->vq; struct socket *sock; mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX); sock = vhost_vq_get_backend(vq); if (!sock) goto out; if (!vq_meta_prefetch(vq)) goto out; vhost_disable_notify(&net->dev, vq); vhost_net_disable_vq(net, vq); if (vhost_sock_zcopy(sock)) handle_tx_zerocopy(net, sock); else handle_tx_copy(net, sock); out: mutex_unlock(&vq->mutex); } static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk) { struct sk_buff *head; int len = 0; unsigned long flags; if (rvq->rx_ring) return vhost_net_buf_peek(rvq); spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); head = skb_peek(&sk->sk_receive_queue); if (likely(head)) { len = head->len; if (skb_vlan_tag_present(head)) len += VLAN_HLEN; } spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); return len; } static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk, bool *busyloop_intr) { struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *rvq = &rnvq->vq; struct vhost_virtqueue *tvq = &tnvq->vq; int len = peek_head_len(rnvq, sk); if (!len && rvq->busyloop_timeout) { /* Flush batched heads first */ vhost_net_signal_used(rnvq); /* Both tx vq and rx socket were polled here */ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true); len = peek_head_len(rnvq, sk); } return len; } /* This is a multi-buffer version of vhost_get_desc, that works if * vq has read descriptors only. * @vq - the relevant virtqueue * @datalen - data length we'll be reading * @iovcount - returned count of io vectors we fill * @log - vhost log * @log_num - log offset * @quota - headcount quota, 1 for big buffer * returns number of buffer heads allocated, negative on error */ static int get_rx_bufs(struct vhost_virtqueue *vq, struct vring_used_elem *heads, int datalen, unsigned *iovcount, struct vhost_log *log, unsigned *log_num, unsigned int quota) { unsigned int out, in; int seg = 0; int headcount = 0; unsigned d; int r, nlogs = 0; /* len is always initialized before use since we are always called with * datalen > 0. */ u32 len; while (datalen > 0 && headcount < quota) { if (unlikely(seg >= UIO_MAXIOV)) { r = -ENOBUFS; goto err; } r = vhost_get_vq_desc(vq, vq->iov + seg, ARRAY_SIZE(vq->iov) - seg, &out, &in, log, log_num); if (unlikely(r < 0)) goto err; d = r; if (d == vq->num) { r = 0; goto err; } if (unlikely(out || in <= 0)) { vq_err(vq, "unexpected descriptor format for RX: " "out %d, in %d\n", out, in); r = -EINVAL; goto err; } if (unlikely(log)) { nlogs += *log_num; log += *log_num; } heads[headcount].id = cpu_to_vhost32(vq, d); len = iov_length(vq->iov + seg, in); heads[headcount].len = cpu_to_vhost32(vq, len); datalen -= len; ++headcount; seg += in; } heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); *iovcount = seg; if (unlikely(log)) *log_num = nlogs; /* Detect overrun */ if (unlikely(datalen > 0)) { r = UIO_MAXIOV + 1; goto err; } return headcount; err: vhost_discard_vq_desc(vq, headcount); return r; } /* Expects to be always run from workqueue - which acts as * read-size critical section for our kind of RCU. */ static void handle_rx(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX]; struct vhost_virtqueue *vq = &nvq->vq; unsigned in, log; struct vhost_log *vq_log; struct msghdr msg = { .msg_name = NULL, .msg_namelen = 0, .msg_control = NULL, /* FIXME: get and handle RX aux data. */ .msg_controllen = 0, .msg_flags = MSG_DONTWAIT, }; struct virtio_net_hdr hdr = { .flags = 0, .gso_type = VIRTIO_NET_HDR_GSO_NONE }; size_t total_len = 0; int err, mergeable; s16 headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; bool busyloop_intr = false; bool set_num_buffers; struct socket *sock; struct iov_iter fixup; __virtio16 num_buffers; int recv_pkts = 0; mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX); sock = vhost_vq_get_backend(vq); if (!sock) goto out; if (!vq_meta_prefetch(vq)) goto out; vhost_disable_notify(&net->dev, vq); vhost_net_disable_vq(net, vq); vhost_hlen = nvq->vhost_hlen; sock_hlen = nvq->sock_hlen; vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ? vq->log : NULL; mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF); set_num_buffers = mergeable || vhost_has_feature(vq, VIRTIO_F_VERSION_1); do { sock_len = vhost_net_rx_peek_head_len(net, sock->sk, &busyloop_intr); if (!sock_len) break; sock_len += sock_hlen; vhost_len = sock_len + vhost_hlen; headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, vhost_len, &in, vq_log, &log, likely(mergeable) ? UIO_MAXIOV : 1); /* On error, stop handling until the next kick. */ if (unlikely(headcount < 0)) goto out; /* OK, now we need to know about added descriptors. */ if (!headcount) { if (unlikely(busyloop_intr)) { vhost_poll_queue(&vq->poll); } else if (unlikely(vhost_enable_notify(&net->dev, vq))) { /* They have slipped one in as we were * doing that: check again. */ vhost_disable_notify(&net->dev, vq); continue; } /* Nothing new? Wait for eventfd to tell us * they refilled. */ goto out; } busyloop_intr = false; if (nvq->rx_ring) msg.msg_control = vhost_net_buf_consume(&nvq->rxq); /* On overrun, truncate and discard */ if (unlikely(headcount > UIO_MAXIOV)) { iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1); err = sock->ops->recvmsg(sock, &msg, 1, MSG_DONTWAIT | MSG_TRUNC); pr_debug("Discarded rx packet: len %zd\n", sock_len); continue; } /* We don't need to be notified again. */ iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len); fixup = msg.msg_iter; if (unlikely((vhost_hlen))) { /* We will supply the header ourselves * TODO: support TSO. */ iov_iter_advance(&msg.msg_iter, vhost_hlen); } err = sock->ops->recvmsg(sock, &msg, sock_len, MSG_DONTWAIT | MSG_TRUNC); /* Userspace might have consumed the packet meanwhile: * it's not supposed to do this usually, but might be hard * to prevent. Discard data we got (if any) and keep going. */ if (unlikely(err != sock_len)) { pr_debug("Discarded rx packet: " " len %d, expected %zd\n", err, sock_len); vhost_discard_vq_desc(vq, headcount); continue; } /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ if (unlikely(vhost_hlen)) { if (copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { vq_err(vq, "Unable to write vnet_hdr " "at addr %p\n", vq->iov->iov_base); goto out; } } else { /* Header came from socket; we'll need to patch * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF */ iov_iter_advance(&fixup, sizeof(hdr)); } /* TODO: Should check and handle checksum. */ num_buffers = cpu_to_vhost16(vq, headcount); if (likely(set_num_buffers) && copy_to_iter(&num_buffers, sizeof num_buffers, &fixup) != sizeof num_buffers) { vq_err(vq, "Failed num_buffers write"); vhost_discard_vq_desc(vq, headcount); goto out; } nvq->done_idx += headcount; if (nvq->done_idx > VHOST_NET_BATCH) vhost_net_signal_used(nvq); if (unlikely(vq_log)) vhost_log_write(vq, vq_log, log, vhost_len, vq->iov, in); total_len += vhost_len; } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len))); if (unlikely(busyloop_intr)) vhost_poll_queue(&vq->poll); else if (!sock_len) vhost_net_enable_vq(net, vq); out: vhost_net_signal_used(nvq); mutex_unlock(&vq->mutex); } static void handle_tx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, poll.work); struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); handle_tx(net); } static void handle_rx_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, poll.work); struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); handle_rx(net); } static void handle_tx_net(struct vhost_work *work) { struct vhost_net *net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work); handle_tx(net); } static void handle_rx_net(struct vhost_work *work) { struct vhost_net *net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work); handle_rx(net); } static int vhost_net_open(struct inode *inode, struct file *f) { struct vhost_net *n; struct vhost_dev *dev; struct vhost_virtqueue **vqs; void **queue; struct xdp_buff *xdp; int i; n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!n) return -ENOMEM; vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL); if (!vqs) { kvfree(n); return -ENOMEM; } queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *), GFP_KERNEL); if (!queue) { kfree(vqs); kvfree(n); return -ENOMEM; } n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue; xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL); if (!xdp) { kfree(vqs); kvfree(n); kfree(queue); return -ENOMEM; } n->vqs[VHOST_NET_VQ_TX].xdp = xdp; dev = &n->dev; vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq; vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq; n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick; n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick; for (i = 0; i < VHOST_NET_VQ_MAX; i++) { n->vqs[i].ubufs = NULL; n->vqs[i].ubuf_info = NULL; n->vqs[i].upend_idx = 0; n->vqs[i].done_idx = 0; n->vqs[i].batched_xdp = 0; n->vqs[i].vhost_hlen = 0; n->vqs[i].sock_hlen = 0; n->vqs[i].rx_ring = NULL; vhost_net_buf_init(&n->vqs[i].rxq); } vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, UIO_MAXIOV + VHOST_NET_BATCH, VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true, NULL); vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev, vqs[VHOST_NET_VQ_TX]); vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev, vqs[VHOST_NET_VQ_RX]); f->private_data = n; page_frag_cache_init(&n->pf_cache); return 0; } static struct socket *vhost_net_stop_vq(struct vhost_net *n, struct vhost_virtqueue *vq) { struct socket *sock; struct vhost_net_virtqueue *nvq = container_of(vq, struct vhost_net_virtqueue, vq); mutex_lock(&vq->mutex); sock = vhost_vq_get_backend(vq); vhost_net_disable_vq(n, vq); vhost_vq_set_backend(vq, NULL); vhost_net_buf_unproduce(nvq); nvq->rx_ring = NULL; mutex_unlock(&vq->mutex); return sock; } static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, struct socket **rx_sock) { *tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq); *rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq); } static void vhost_net_flush(struct vhost_net *n) { vhost_dev_flush(&n->dev); if (n->vqs[VHOST_NET_VQ_TX].ubufs) { mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n->tx_flush = true; mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); /* Wait for all lower device DMAs done. */ vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs); mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); n->tx_flush = false; atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1); mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex); } } static int vhost_net_release(struct inode *inode, struct file *f) { struct vhost_net *n = f->private_data; struct socket *tx_sock; struct socket *rx_sock; vhost_net_stop(n, &tx_sock, &rx_sock); vhost_net_flush(n); vhost_dev_stop(&n->dev); vhost_dev_cleanup(&n->dev); vhost_net_vq_reset(n); if (tx_sock) sockfd_put(tx_sock); if (rx_sock) sockfd_put(rx_sock); /* Make sure no callbacks are outstanding */ synchronize_rcu(); /* We do an extra flush before freeing memory, * since jobs can re-queue themselves. */ vhost_net_flush(n); kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue); kfree(n->vqs[VHOST_NET_VQ_TX].xdp); kfree(n->dev.vqs); page_frag_cache_drain(&n->pf_cache); kvfree(n); return 0; } static struct socket *get_raw_socket(int fd) { int r; struct socket *sock = sockfd_lookup(fd, &r); if (!sock) return ERR_PTR(-ENOTSOCK); /* Parameter checking */ if (sock->sk->sk_type != SOCK_RAW) { r = -ESOCKTNOSUPPORT; goto err; } if (sock->sk->sk_family != AF_PACKET) { r = -EPFNOSUPPORT; goto err; } return sock; err: sockfd_put(sock); return ERR_PTR(r); } static struct ptr_ring *get_tap_ptr_ring(struct file *file) { struct ptr_ring *ring; ring = tun_get_tx_ring(file); if (!IS_ERR(ring)) goto out; ring = tap_get_ptr_ring(file); if (!IS_ERR(ring)) goto out; ring = NULL; out: return ring; } static struct socket *get_tap_socket(int fd) { struct file *file = fget(fd); struct socket *sock; if (!file) return ERR_PTR(-EBADF); sock = tun_get_socket(file); if (!IS_ERR(sock)) return sock; sock = tap_get_socket(file); if (IS_ERR(sock)) fput(file); return sock; } static struct socket *get_socket(int fd) { struct socket *sock; /* special case to disable backend */ if (fd == -1) return NULL; sock = get_raw_socket(fd); if (!IS_ERR(sock)) return sock; sock = get_tap_socket(fd); if (!IS_ERR(sock)) return sock; return ERR_PTR(-ENOTSOCK); } static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) { struct socket *sock, *oldsock; struct vhost_virtqueue *vq; struct vhost_net_virtqueue *nvq; struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL; int r; mutex_lock(&n->dev.mutex); r = vhost_dev_check_owner(&n->dev); if (r) goto err; if (index >= VHOST_NET_VQ_MAX) { r = -ENOBUFS; goto err; } vq = &n->vqs[index].vq; nvq = &n->vqs[index]; mutex_lock(&vq->mutex); if (fd == -1) vhost_clear_msg(&n->dev); /* Verify that ring has been setup correctly. */ if (!vhost_vq_access_ok(vq)) { r = -EFAULT; goto err_vq; } sock = get_socket(fd); if (IS_ERR(sock)) { r = PTR_ERR(sock); goto err_vq; } /* start polling new socket */ oldsock = vhost_vq_get_backend(vq); if (sock != oldsock) { ubufs = vhost_net_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); if (IS_ERR(ubufs)) { r = PTR_ERR(ubufs); goto err_ubufs; } vhost_net_disable_vq(n, vq); vhost_vq_set_backend(vq, sock); vhost_net_buf_unproduce(nvq); r = vhost_vq_init_access(vq); if (r) goto err_used; r = vhost_net_enable_vq(n, vq); if (r) goto err_used; if (index == VHOST_NET_VQ_RX) { if (sock) nvq->rx_ring = get_tap_ptr_ring(sock->file); else nvq->rx_ring = NULL; } oldubufs = nvq->ubufs; nvq->ubufs = ubufs; n->tx_packets = 0; n->tx_zcopy_err = 0; n->tx_flush = false; } mutex_unlock(&vq->mutex); if (oldubufs) { vhost_net_ubuf_put_wait_and_free(oldubufs); mutex_lock(&vq->mutex); vhost_zerocopy_signal_used(n, vq); mutex_unlock(&vq->mutex); } if (oldsock) { vhost_dev_flush(&n->dev); sockfd_put(oldsock); } mutex_unlock(&n->dev.mutex); return 0; err_used: vhost_vq_set_backend(vq, oldsock); vhost_net_enable_vq(n, vq); if (ubufs) vhost_net_ubuf_put_wait_and_free(ubufs); err_ubufs: if (sock) sockfd_put(sock); err_vq: mutex_unlock(&vq->mutex); err: mutex_unlock(&n->dev.mutex); return r; } static long vhost_net_reset_owner(struct vhost_net *n) { struct socket *tx_sock = NULL; struct socket *rx_sock = NULL; long err; struct vhost_iotlb *umem; mutex_lock(&n->dev.mutex); err = vhost_dev_check_owner(&n->dev); if (err) goto done; umem = vhost_dev_reset_owner_prepare(); if (!umem) { err = -ENOMEM; goto done; } vhost_net_stop(n, &tx_sock, &rx_sock); vhost_net_flush(n); vhost_dev_stop(&n->dev); vhost_dev_reset_owner(&n->dev, umem); vhost_net_vq_reset(n); done: mutex_unlock(&n->dev.mutex); if (tx_sock) sockfd_put(tx_sock); if (rx_sock) sockfd_put(rx_sock); return err; } static int vhost_net_set_features(struct vhost_net *n, u64 features) { size_t vhost_hlen, sock_hlen, hdr_len; int i; hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) ? sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr); if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { /* vhost provides vnet_hdr */ vhost_hlen = hdr_len; sock_hlen = 0; } else { /* socket provides vnet_hdr */ vhost_hlen = 0; sock_hlen = hdr_len; } mutex_lock(&n->dev.mutex); if ((features & (1 << VHOST_F_LOG_ALL)) && !vhost_log_access_ok(&n->dev)) goto out_unlock; if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) { if (vhost_init_device_iotlb(&n->dev)) goto out_unlock; } for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { mutex_lock(&n->vqs[i].vq.mutex); n->vqs[i].vq.acked_features = features; n->vqs[i].vhost_hlen = vhost_hlen; n->vqs[i].sock_hlen = sock_hlen; mutex_unlock(&n->vqs[i].vq.mutex); } mutex_unlock(&n->dev.mutex); return 0; out_unlock: mutex_unlock(&n->dev.mutex); return -EFAULT; } static long vhost_net_set_owner(struct vhost_net *n) { int r; mutex_lock(&n->dev.mutex); if (vhost_dev_has_owner(&n->dev)) { r = -EBUSY; goto out; } r = vhost_net_set_ubuf_info(n); if (r) goto out; r = vhost_dev_set_owner(&n->dev); if (r) vhost_net_clear_ubuf_info(n); vhost_net_flush(n); out: mutex_unlock(&n->dev.mutex); return r; } static long vhost_net_ioctl(struct file *f, unsigned int ioctl, unsigned long arg) { struct vhost_net *n = f->private_data; void __user *argp = (void __user *)arg; u64 __user *featurep = argp; struct vhost_vring_file backend; u64 features; int r; switch (ioctl) { case VHOST_NET_SET_BACKEND: if (copy_from_user(&backend, argp, sizeof backend)) return -EFAULT; return vhost_net_set_backend(n, backend.index, backend.fd); case VHOST_GET_FEATURES: features = VHOST_NET_FEATURES; if (copy_to_user(featurep, &features, sizeof features)) return -EFAULT; return 0; case VHOST_SET_FEATURES: if (copy_from_user(&features, featurep, sizeof features)) return -EFAULT; if (features & ~VHOST_NET_FEATURES) return -EOPNOTSUPP; return vhost_net_set_features(n, features); case VHOST_GET_BACKEND_FEATURES: features = VHOST_NET_BACKEND_FEATURES; if (copy_to_user(featurep, &features, sizeof(features))) return -EFAULT; return 0; case VHOST_SET_BACKEND_FEATURES: if (copy_from_user(&features, featurep, sizeof(features))) return -EFAULT; if (features & ~VHOST_NET_BACKEND_FEATURES) return -EOPNOTSUPP; vhost_set_backend_features(&n->dev, features); return 0; case VHOST_RESET_OWNER: return vhost_net_reset_owner(n); case VHOST_SET_OWNER: return vhost_net_set_owner(n); default: mutex_lock(&n->dev.mutex); r = vhost_dev_ioctl(&n->dev, ioctl, argp); if (r == -ENOIOCTLCMD) r = vhost_vring_ioctl(&n->dev, ioctl, argp); else vhost_net_flush(n); mutex_unlock(&n->dev.mutex); return r; } } static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct vhost_net *n = file->private_data; struct vhost_dev *dev = &n->dev; int noblock = file->f_flags & O_NONBLOCK; return vhost_chr_read_iter(dev, to, noblock); } static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct vhost_net *n = file->private_data; struct vhost_dev *dev = &n->dev; return vhost_chr_write_iter(dev, from); } static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait) { struct vhost_net *n = file->private_data; struct vhost_dev *dev = &n->dev; return vhost_chr_poll(file, dev, wait); } static const struct file_operations vhost_net_fops = { .owner = THIS_MODULE, .release = vhost_net_release, .read_iter = vhost_net_chr_read_iter, .write_iter = vhost_net_chr_write_iter, .poll = vhost_net_chr_poll, .unlocked_ioctl = vhost_net_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = vhost_net_open, .llseek = noop_llseek, }; static struct miscdevice vhost_net_misc = { .minor = VHOST_NET_MINOR, .name = "vhost-net", .fops = &vhost_net_fops, }; static int __init vhost_net_init(void) { if (experimental_zcopytx) vhost_net_enable_zcopy(VHOST_NET_VQ_TX); return misc_register(&vhost_net_misc); } module_init(vhost_net_init); static void __exit vhost_net_exit(void) { misc_deregister(&vhost_net_misc); } module_exit(vhost_net_exit); MODULE_VERSION("0.0.1"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Michael S. Tsirkin"); MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); MODULE_ALIAS("devname:vhost-net"); |
13 1 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 | // SPDX-License-Identifier: GPL-2.0-only /* * netfilter module to enforce network quotas * * Sam Johnston <samj@samj.net> */ #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_quota.h> #include <linux/module.h> struct xt_quota_priv { spinlock_t lock; uint64_t quota; }; MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sam Johnston <samj@samj.net>"); MODULE_DESCRIPTION("Xtables: countdown quota match"); MODULE_ALIAS("ipt_quota"); MODULE_ALIAS("ip6t_quota"); static bool quota_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct xt_quota_info *q = (void *)par->matchinfo; struct xt_quota_priv *priv = q->master; bool ret = q->flags & XT_QUOTA_INVERT; spin_lock_bh(&priv->lock); if (priv->quota >= skb->len) { priv->quota -= skb->len; ret = !ret; } else { /* we do not allow even small packets from now on */ priv->quota = 0; } spin_unlock_bh(&priv->lock); return ret; } static int quota_mt_check(const struct xt_mtchk_param *par) { struct xt_quota_info *q = par->matchinfo; if (q->flags & ~XT_QUOTA_MASK) return -EINVAL; q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); if (q->master == NULL) return -ENOMEM; spin_lock_init(&q->master->lock); q->master->quota = q->quota; return 0; } static void quota_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_quota_info *q = par->matchinfo; kfree(q->master); } static struct xt_match quota_mt_reg __read_mostly = { .name = "quota", .revision = 0, .family = NFPROTO_UNSPEC, .match = quota_mt, .checkentry = quota_mt_check, .destroy = quota_mt_destroy, .matchsize = sizeof(struct xt_quota_info), .usersize = offsetof(struct xt_quota_info, master), .me = THIS_MODULE, }; static int __init quota_mt_init(void) { return xt_register_match("a_mt_reg); } static void __exit quota_mt_exit(void) { xt_unregister_match("a_mt_reg); } module_init(quota_mt_init); module_exit(quota_mt_exit); |
1 1 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 | // SPDX-License-Identifier: GPL-2.0-only /* * linux/net/sunrpc/svc.c * * High-level RPC service routines * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> * * Multiple threads pools and NUMAisation * Copyright (c) 2006 Silicon Graphics, Inc. * by Greg Banks <gnb@melbourne.sgi.com> */ #include <linux/linkage.h> #include <linux/sched/signal.h> #include <linux/errno.h> #include <linux/net.h> #include <linux/in.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/bc_xprt.h> #include <trace/events/sunrpc.h> #include "fail.h" #include "sunrpc.h" #define RPCDBG_FACILITY RPCDBG_SVCDSP static void svc_unregister(const struct svc_serv *serv, struct net *net); #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL /* * Mode for mapping cpus to pools. */ enum { SVC_POOL_AUTO = -1, /* choose one of the others */ SVC_POOL_GLOBAL, /* no mapping, just a single global pool * (legacy & UP mode) */ SVC_POOL_PERCPU, /* one pool per cpu */ SVC_POOL_PERNODE /* one pool per numa node */ }; /* * Structure for mapping cpus to pools and vice versa. * Setup once during sunrpc initialisation. */ struct svc_pool_map { int count; /* How many svc_servs use us */ int mode; /* Note: int not enum to avoid * warnings about "enumeration value * not handled in switch" */ unsigned int npools; unsigned int *pool_to; /* maps pool id to cpu or node */ unsigned int *to_pool; /* maps cpu or node to pool id */ }; static struct svc_pool_map svc_pool_map = { .mode = SVC_POOL_DEFAULT }; static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */ static int __param_set_pool_mode(const char *val, struct svc_pool_map *m) { int err, mode; mutex_lock(&svc_pool_map_mutex); err = 0; if (!strncmp(val, "auto", 4)) mode = SVC_POOL_AUTO; else if (!strncmp(val, "global", 6)) mode = SVC_POOL_GLOBAL; else if (!strncmp(val, "percpu", 6)) mode = SVC_POOL_PERCPU; else if (!strncmp(val, "pernode", 7)) mode = SVC_POOL_PERNODE; else err = -EINVAL; if (err) goto out; if (m->count == 0) m->mode = mode; else if (mode != m->mode) err = -EBUSY; out: mutex_unlock(&svc_pool_map_mutex); return err; } static int param_set_pool_mode(const char *val, const struct kernel_param *kp) { struct svc_pool_map *m = kp->arg; return __param_set_pool_mode(val, m); } int sunrpc_set_pool_mode(const char *val) { return __param_set_pool_mode(val, &svc_pool_map); } EXPORT_SYMBOL(sunrpc_set_pool_mode); /** * sunrpc_get_pool_mode - get the current pool_mode for the host * @buf: where to write the current pool_mode * @size: size of @buf * * Grab the current pool_mode from the svc_pool_map and write * the resulting string to @buf. Returns the number of characters * written to @buf (a'la snprintf()). */ int sunrpc_get_pool_mode(char *buf, size_t size) { struct svc_pool_map *m = &svc_pool_map; switch (m->mode) { case SVC_POOL_AUTO: return snprintf(buf, size, "auto"); case SVC_POOL_GLOBAL: return snprintf(buf, size, "global"); case SVC_POOL_PERCPU: return snprintf(buf, size, "percpu"); case SVC_POOL_PERNODE: return snprintf(buf, size, "pernode"); default: return snprintf(buf, size, "%d", m->mode); } } EXPORT_SYMBOL(sunrpc_get_pool_mode); static int param_get_pool_mode(char *buf, const struct kernel_param *kp) { char str[16]; int len; len = sunrpc_get_pool_mode(str, ARRAY_SIZE(str)); /* Ensure we have room for newline and NUL */ len = min_t(int, len, ARRAY_SIZE(str) - 2); /* tack on the newline */ str[len] = '\n'; str[len + 1] = '\0'; return sysfs_emit(buf, "%s", str); } module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode, &svc_pool_map, 0644); /* * Detect best pool mapping mode heuristically, * according to the machine's topology. */ static int svc_pool_map_choose_mode(void) { unsigned int node; if (nr_online_nodes > 1) { /* * Actually have multiple NUMA nodes, * so split pools on NUMA node boundaries */ return SVC_POOL_PERNODE; } node = first_online_node; if (nr_cpus_node(node) > 2) { /* * Non-trivial SMP, or CONFIG_NUMA on * non-NUMA hardware, e.g. with a generic * x86_64 kernel on Xeons. In this case we * want to divide the pools on cpu boundaries. */ return SVC_POOL_PERCPU; } /* default: one global pool */ return SVC_POOL_GLOBAL; } /* * Allocate the to_pool[] and pool_to[] arrays. * Returns 0 on success or an errno. */ static int svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) { m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); if (!m->to_pool) goto fail; m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL); if (!m->pool_to) goto fail_free; return 0; fail_free: kfree(m->to_pool); m->to_pool = NULL; fail: return -ENOMEM; } /* * Initialise the pool map for SVC_POOL_PERCPU mode. * Returns number of pools or <0 on error. */ static int svc_pool_map_init_percpu(struct svc_pool_map *m) { unsigned int maxpools = nr_cpu_ids; unsigned int pidx = 0; unsigned int cpu; int err; err = svc_pool_map_alloc_arrays(m, maxpools); if (err) return err; for_each_online_cpu(cpu) { BUG_ON(pidx >= maxpools); m->to_pool[cpu] = pidx; m->pool_to[pidx] = cpu; pidx++; } /* cpus brought online later all get mapped to pool0, sorry */ return pidx; }; /* * Initialise the pool map for SVC_POOL_PERNODE mode. * Returns number of pools or <0 on error. */ static int svc_pool_map_init_pernode(struct svc_pool_map *m) { unsigned int maxpools = nr_node_ids; unsigned int pidx = 0; unsigned int node; int err; err = svc_pool_map_alloc_arrays(m, maxpools); if (err) return err; for_each_node_with_cpus(node) { /* some architectures (e.g. SN2) have cpuless nodes */ BUG_ON(pidx > maxpools); m->to_pool[node] = pidx; m->pool_to[pidx] = node; pidx++; } /* nodes brought online later all get mapped to pool0, sorry */ return pidx; } /* * Add a reference to the global map of cpus to pools (and * vice versa) if pools are in use. * Initialise the map if we're the first user. * Returns the number of pools. If this is '1', no reference * was taken. */ static unsigned int svc_pool_map_get(void) { struct svc_pool_map *m = &svc_pool_map; int npools = -1; mutex_lock(&svc_pool_map_mutex); if (m->count++) { mutex_unlock(&svc_pool_map_mutex); return m->npools; } if (m->mode == SVC_POOL_AUTO) m->mode = svc_pool_map_choose_mode(); switch (m->mode) { case SVC_POOL_PERCPU: npools = svc_pool_map_init_percpu(m); break; case SVC_POOL_PERNODE: npools = svc_pool_map_init_pernode(m); break; } if (npools <= 0) { /* default, or memory allocation failure */ npools = 1; m->mode = SVC_POOL_GLOBAL; } m->npools = npools; mutex_unlock(&svc_pool_map_mutex); return npools; } /* * Drop a reference to the global map of cpus to pools. * When the last reference is dropped, the map data is * freed; this allows the sysadmin to change the pool. */ static void svc_pool_map_put(void) { struct svc_pool_map *m = &svc_pool_map; mutex_lock(&svc_pool_map_mutex); if (!--m->count) { kfree(m->to_pool); m->to_pool = NULL; kfree(m->pool_to); m->pool_to = NULL; m->npools = 0; } mutex_unlock(&svc_pool_map_mutex); } static int svc_pool_map_get_node(unsigned int pidx) { const struct svc_pool_map *m = &svc_pool_map; if (m->count) { if (m->mode == SVC_POOL_PERCPU) return cpu_to_node(m->pool_to[pidx]); if (m->mode == SVC_POOL_PERNODE) return m->pool_to[pidx]; } return NUMA_NO_NODE; } /* * Set the given thread's cpus_allowed mask so that it * will only run on cpus in the given pool. */ static inline void svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) { struct svc_pool_map *m = &svc_pool_map; unsigned int node = m->pool_to[pidx]; /* * The caller checks for sv_nrpools > 1, which * implies that we've been initialized. */ WARN_ON_ONCE(m->count == 0); if (m->count == 0) return; switch (m->mode) { case SVC_POOL_PERCPU: { set_cpus_allowed_ptr(task, cpumask_of(node)); break; } case SVC_POOL_PERNODE: { set_cpus_allowed_ptr(task, cpumask_of_node(node)); break; } } } /** * svc_pool_for_cpu - Select pool to run a thread on this cpu * @serv: An RPC service * * Use the active CPU and the svc_pool_map's mode setting to * select the svc thread pool to use. Once initialized, the * svc_pool_map does not change. * * Return value: * A pointer to an svc_pool */ struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv) { struct svc_pool_map *m = &svc_pool_map; int cpu = raw_smp_processor_id(); unsigned int pidx = 0; if (serv->sv_nrpools <= 1) return serv->sv_pools; switch (m->mode) { case SVC_POOL_PERCPU: pidx = m->to_pool[cpu]; break; case SVC_POOL_PERNODE: pidx = m->to_pool[cpu_to_node(cpu)]; break; } return &serv->sv_pools[pidx % serv->sv_nrpools]; } static int svc_rpcb_setup(struct svc_serv *serv, struct net *net) { int err; err = rpcb_create_local(net); if (err) return err; /* Remove any stale portmap registrations */ svc_unregister(serv, net); return 0; } void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net) { svc_unregister(serv, net); rpcb_put_local(net); } EXPORT_SYMBOL_GPL(svc_rpcb_cleanup); static int svc_uses_rpcbind(struct svc_serv *serv) { unsigned int p, i; for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { if (progp->pg_vers[i] == NULL) continue; if (!progp->pg_vers[i]->vs_hidden) return 1; } } return 0; } int svc_bind(struct svc_serv *serv, struct net *net) { if (!svc_uses_rpcbind(serv)) return 0; return svc_rpcb_setup(serv, net); } EXPORT_SYMBOL_GPL(svc_bind); #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void __svc_init_bc(struct svc_serv *serv) { lwq_init(&serv->sv_cb_list); } #else static void __svc_init_bc(struct svc_serv *serv) { } #endif /* * Create an RPC service */ static struct svc_serv * __svc_create(struct svc_program *prog, int nprogs, struct svc_stat *stats, unsigned int bufsize, int npools, int (*threadfn)(void *data)) { struct svc_serv *serv; unsigned int vers; unsigned int xdrsize; unsigned int i; if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL))) return NULL; serv->sv_name = prog->pg_name; serv->sv_programs = prog; serv->sv_nprogs = nprogs; serv->sv_stats = stats; if (bufsize > RPCSVC_MAXPAYLOAD) bufsize = RPCSVC_MAXPAYLOAD; serv->sv_max_payload = bufsize? bufsize : 4096; serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE); serv->sv_threadfn = threadfn; xdrsize = 0; for (i = 0; i < nprogs; i++) { struct svc_program *progp = &prog[i]; progp->pg_lovers = progp->pg_nvers-1; for (vers = 0; vers < progp->pg_nvers ; vers++) if (progp->pg_vers[vers]) { progp->pg_hivers = vers; if (progp->pg_lovers > vers) progp->pg_lovers = vers; if (progp->pg_vers[vers]->vs_xdrsize > xdrsize) xdrsize = progp->pg_vers[vers]->vs_xdrsize; } } serv->sv_xdrsize = xdrsize; INIT_LIST_HEAD(&serv->sv_tempsocks); INIT_LIST_HEAD(&serv->sv_permsocks); timer_setup(&serv->sv_temptimer, NULL, 0); spin_lock_init(&serv->sv_lock); __svc_init_bc(serv); serv->sv_nrpools = npools; serv->sv_pools = kcalloc(serv->sv_nrpools, sizeof(struct svc_pool), GFP_KERNEL); if (!serv->sv_pools) { kfree(serv); return NULL; } for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; dprintk("svc: initialising pool %u for %s\n", i, serv->sv_name); pool->sp_id = i; lwq_init(&pool->sp_xprts); INIT_LIST_HEAD(&pool->sp_all_threads); init_llist_head(&pool->sp_idle_threads); percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); } return serv; } /** * svc_create - Create an RPC service * @prog: the RPC program the new service will handle * @bufsize: maximum message size for @prog * @threadfn: a function to service RPC requests for @prog * * Returns an instantiated struct svc_serv object or NULL. */ struct svc_serv *svc_create(struct svc_program *prog, unsigned int bufsize, int (*threadfn)(void *data)) { return __svc_create(prog, 1, NULL, bufsize, 1, threadfn); } EXPORT_SYMBOL_GPL(svc_create); /** * svc_create_pooled - Create an RPC service with pooled threads * @prog: Array of RPC programs the new service will handle * @nprogs: Number of programs in the array * @stats: the stats struct if desired * @bufsize: maximum message size for @prog * @threadfn: a function to service RPC requests for @prog * * Returns an instantiated struct svc_serv object or NULL. */ struct svc_serv *svc_create_pooled(struct svc_program *prog, unsigned int nprogs, struct svc_stat *stats, unsigned int bufsize, int (*threadfn)(void *data)) { struct svc_serv *serv; unsigned int npools = svc_pool_map_get(); serv = __svc_create(prog, nprogs, stats, bufsize, npools, threadfn); if (!serv) goto out_err; serv->sv_is_pooled = true; return serv; out_err: svc_pool_map_put(); return NULL; } EXPORT_SYMBOL_GPL(svc_create_pooled); /* * Destroy an RPC service. Should be called with appropriate locking to * protect sv_permsocks and sv_tempsocks. */ void svc_destroy(struct svc_serv **servp) { struct svc_serv *serv = *servp; unsigned int i; *servp = NULL; dprintk("svc: svc_destroy(%s)\n", serv->sv_programs->pg_name); timer_shutdown_sync(&serv->sv_temptimer); /* * Remaining transports at this point are not expected. */ WARN_ONCE(!list_empty(&serv->sv_permsocks), "SVC: permsocks remain for %s\n", serv->sv_programs->pg_name); WARN_ONCE(!list_empty(&serv->sv_tempsocks), "SVC: tempsocks remain for %s\n", serv->sv_programs->pg_name); cache_clean_deferred(serv); if (serv->sv_is_pooled) svc_pool_map_put(); for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; percpu_counter_destroy(&pool->sp_messages_arrived); percpu_counter_destroy(&pool->sp_sockets_queued); percpu_counter_destroy(&pool->sp_threads_woken); } kfree(serv->sv_pools); kfree(serv); } EXPORT_SYMBOL_GPL(svc_destroy); static bool svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) { unsigned long pages, ret; /* bc_xprt uses fore channel allocated buffers */ if (svc_is_backchannel(rqstp)) return true; pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. * We assume one is at most one page */ WARN_ON_ONCE(pages > RPCSVC_MAXPAGES); if (pages > RPCSVC_MAXPAGES) pages = RPCSVC_MAXPAGES; ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages, rqstp->rq_pages); return ret == pages; } /* * Release an RPC server buffer */ static void svc_release_buffer(struct svc_rqst *rqstp) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++) if (rqstp->rq_pages[i]) put_page(rqstp->rq_pages[i]); } static void svc_rqst_free(struct svc_rqst *rqstp) { folio_batch_release(&rqstp->rq_fbatch); svc_release_buffer(rqstp); if (rqstp->rq_scratch_page) put_page(rqstp->rq_scratch_page); kfree(rqstp->rq_resp); kfree(rqstp->rq_argp); kfree(rqstp->rq_auth_data); kfree_rcu(rqstp, rq_rcu_head); } static struct svc_rqst * svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) { struct svc_rqst *rqstp; rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); if (!rqstp) return rqstp; folio_batch_init(&rqstp->rq_fbatch); rqstp->rq_server = serv; rqstp->rq_pool = pool; rqstp->rq_scratch_page = alloc_pages_node(node, GFP_KERNEL, 0); if (!rqstp->rq_scratch_page) goto out_enomem; rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); if (!rqstp->rq_argp) goto out_enomem; rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); if (!rqstp->rq_resp) goto out_enomem; if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) goto out_enomem; rqstp->rq_err = -EAGAIN; /* No error yet */ serv->sv_nrthreads += 1; pool->sp_nrthreads += 1; /* Protected by whatever lock the service uses when calling * svc_set_num_threads() */ list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads); return rqstp; out_enomem: svc_rqst_free(rqstp); return NULL; } /** * svc_pool_wake_idle_thread - Awaken an idle thread in @pool * @pool: service thread pool * * Can be called from soft IRQ or process context. Finding an idle * service thread and marking it BUSY is atomic with respect to * other calls to svc_pool_wake_idle_thread(). * */ void svc_pool_wake_idle_thread(struct svc_pool *pool) { struct svc_rqst *rqstp; struct llist_node *ln; rcu_read_lock(); ln = READ_ONCE(pool->sp_idle_threads.first); if (ln) { rqstp = llist_entry(ln, struct svc_rqst, rq_idle); WRITE_ONCE(rqstp->rq_qtime, ktime_get()); if (!task_is_running(rqstp->rq_task)) { wake_up_process(rqstp->rq_task); trace_svc_wake_up(rqstp->rq_task->pid); percpu_counter_inc(&pool->sp_threads_woken); } rcu_read_unlock(); return; } rcu_read_unlock(); } EXPORT_SYMBOL_GPL(svc_pool_wake_idle_thread); static struct svc_pool * svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; } static struct svc_pool * svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool, unsigned int *state) { struct svc_pool *pool; unsigned int i; pool = target_pool; if (!pool) { for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; if (pool->sp_nrthreads) break; } } if (pool && pool->sp_nrthreads) { set_bit(SP_VICTIM_REMAINS, &pool->sp_flags); set_bit(SP_NEED_VICTIM, &pool->sp_flags); return pool; } return NULL; } static int svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { struct svc_rqst *rqstp; struct task_struct *task; struct svc_pool *chosen_pool; unsigned int state = serv->sv_nrthreads-1; int node; int err; do { nrservs--; chosen_pool = svc_pool_next(serv, pool, &state); node = svc_pool_map_get_node(chosen_pool->sp_id); rqstp = svc_prepare_thread(serv, chosen_pool, node); if (!rqstp) return -ENOMEM; task = kthread_create_on_node(serv->sv_threadfn, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { svc_exit_thread(rqstp); return PTR_ERR(task); } rqstp->rq_task = task; if (serv->sv_nrpools > 1) svc_pool_map_set_cpumask(task, chosen_pool->sp_id); svc_sock_update_bufs(serv); wake_up_process(task); wait_var_event(&rqstp->rq_err, rqstp->rq_err != -EAGAIN); err = rqstp->rq_err; if (err) { svc_exit_thread(rqstp); return err; } } while (nrservs > 0); return 0; } static int svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { unsigned int state = serv->sv_nrthreads-1; struct svc_pool *victim; do { victim = svc_pool_victim(serv, pool, &state); if (!victim) break; svc_pool_wake_idle_thread(victim); wait_on_bit(&victim->sp_flags, SP_VICTIM_REMAINS, TASK_IDLE); nrservs++; } while (nrservs < 0); return 0; } /** * svc_set_num_threads - adjust number of threads per RPC service * @serv: RPC service to adjust * @pool: Specific pool from which to choose threads, or NULL * @nrservs: New number of threads for @serv (0 or less means kill all threads) * * Create or destroy threads to make the number of threads for @serv the * given number. If @pool is non-NULL, change only threads in that pool; * otherwise, round-robin between all pools for @serv. @serv's * sv_nrthreads is adjusted for each thread created or destroyed. * * Caller must ensure mutual exclusion between this and server startup or * shutdown. * * Returns zero on success or a negative errno if an error occurred while * starting a thread. */ int svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { if (!pool) nrservs -= serv->sv_nrthreads; else nrservs -= pool->sp_nrthreads; if (nrservs > 0) return svc_start_kthreads(serv, pool, nrservs); if (nrservs < 0) return svc_stop_kthreads(serv, pool, nrservs); return 0; } EXPORT_SYMBOL_GPL(svc_set_num_threads); /** * svc_rqst_replace_page - Replace one page in rq_pages[] * @rqstp: svc_rqst with pages to replace * @page: replacement page * * When replacing a page in rq_pages, batch the release of the * replaced pages to avoid hammering the page allocator. * * Return values: * %true: page replaced * %false: array bounds checking failed */ bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page) { struct page **begin = rqstp->rq_pages; struct page **end = &rqstp->rq_pages[RPCSVC_MAXPAGES]; if (unlikely(rqstp->rq_next_page < begin || rqstp->rq_next_page > end)) { trace_svc_replace_page_err(rqstp); return false; } if (*rqstp->rq_next_page) { if (!folio_batch_add(&rqstp->rq_fbatch, page_folio(*rqstp->rq_next_page))) __folio_batch_release(&rqstp->rq_fbatch); } get_page(page); *(rqstp->rq_next_page++) = page; return true; } EXPORT_SYMBOL_GPL(svc_rqst_replace_page); /** * svc_rqst_release_pages - Release Reply buffer pages * @rqstp: RPC transaction context * * Release response pages that might still be in flight after * svc_send, and any spliced filesystem-owned pages. */ void svc_rqst_release_pages(struct svc_rqst *rqstp) { int i, count = rqstp->rq_next_page - rqstp->rq_respages; if (count) { release_pages(rqstp->rq_respages, count); for (i = 0; i < count; i++) rqstp->rq_respages[i] = NULL; } } /** * svc_exit_thread - finalise the termination of a sunrpc server thread * @rqstp: the svc_rqst which represents the thread. * * When a thread started with svc_new_thread() exits it must call * svc_exit_thread() as its last act. This must be done with the * service mutex held. Normally this is held by a DIFFERENT thread, the * one that is calling svc_set_num_threads() and which will wait for * SP_VICTIM_REMAINS to be cleared before dropping the mutex. If the * thread exits for any reason other than svc_thread_should_stop() * returning %true (which indicated that svc_set_num_threads() is * waiting for it to exit), then it must take the service mutex itself, * which can only safely be done using mutex_try_lock(). */ void svc_exit_thread(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct svc_pool *pool = rqstp->rq_pool; list_del_rcu(&rqstp->rq_all); pool->sp_nrthreads -= 1; serv->sv_nrthreads -= 1; svc_sock_update_bufs(serv); svc_rqst_free(rqstp); clear_and_wake_up_bit(SP_VICTIM_REMAINS, &pool->sp_flags); } EXPORT_SYMBOL_GPL(svc_exit_thread); /* * Register an "inet" protocol family netid with the local * rpcbind daemon via an rpcbind v4 SET request. * * No netconfig infrastructure is available in the kernel, so * we map IP_ protocol numbers to netids by hand. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_rpcb_register4(struct net *net, const u32 program, const u32 version, const unsigned short protocol, const unsigned short port) { const struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; const char *netid; int error; switch (protocol) { case IPPROTO_UDP: netid = RPCBIND_NETID_UDP; break; case IPPROTO_TCP: netid = RPCBIND_NETID_TCP; break; default: return -ENOPROTOOPT; } error = rpcb_v4_register(net, program, version, (const struct sockaddr *)&sin, netid); /* * User space didn't support rpcbind v4, so retry this * registration request with the legacy rpcbind v2 protocol. */ if (error == -EPROTONOSUPPORT) error = rpcb_register(net, program, version, protocol, port); return error; } #if IS_ENABLED(CONFIG_IPV6) /* * Register an "inet6" protocol family netid with the local * rpcbind daemon via an rpcbind v4 SET request. * * No netconfig infrastructure is available in the kernel, so * we map IP_ protocol numbers to netids by hand. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_rpcb_register6(struct net *net, const u32 program, const u32 version, const unsigned short protocol, const unsigned short port) { const struct sockaddr_in6 sin6 = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, .sin6_port = htons(port), }; const char *netid; int error; switch (protocol) { case IPPROTO_UDP: netid = RPCBIND_NETID_UDP6; break; case IPPROTO_TCP: netid = RPCBIND_NETID_TCP6; break; default: return -ENOPROTOOPT; } error = rpcb_v4_register(net, program, version, (const struct sockaddr *)&sin6, netid); /* * User space didn't support rpcbind version 4, so we won't * use a PF_INET6 listener. */ if (error == -EPROTONOSUPPORT) error = -EAFNOSUPPORT; return error; } #endif /* IS_ENABLED(CONFIG_IPV6) */ /* * Register a kernel RPC service via rpcbind version 4. * * Returns zero on success; a negative errno value is returned * if any error occurs. */ static int __svc_register(struct net *net, const char *progname, const u32 program, const u32 version, const int family, const unsigned short protocol, const unsigned short port) { int error = -EAFNOSUPPORT; switch (family) { case PF_INET: error = __svc_rpcb_register4(net, program, version, protocol, port); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: error = __svc_rpcb_register6(net, program, version, protocol, port); #endif } trace_svc_register(progname, version, family, protocol, port, error); return error; } static int svc_rpcbind_set_version(struct net *net, const struct svc_program *progp, u32 version, int family, unsigned short proto, unsigned short port) { return __svc_register(net, progp->pg_name, progp->pg_prog, version, family, proto, port); } int svc_generic_rpcbind_set(struct net *net, const struct svc_program *progp, u32 version, int family, unsigned short proto, unsigned short port) { const struct svc_version *vers = progp->pg_vers[version]; int error; if (vers == NULL) return 0; if (vers->vs_hidden) { trace_svc_noregister(progp->pg_name, version, proto, port, family, 0); return 0; } /* * Don't register a UDP port if we need congestion * control. */ if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP) return 0; error = svc_rpcbind_set_version(net, progp, version, family, proto, port); return (vers->vs_rpcb_optnl) ? 0 : error; } EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set); /** * svc_register - register an RPC service with the local portmapper * @serv: svc_serv struct for the service to register * @net: net namespace for the service to register * @family: protocol family of service's listener socket * @proto: transport protocol number to advertise * @port: port to advertise * * Service is registered for any address in the passed-in protocol family */ int svc_register(const struct svc_serv *serv, struct net *net, const int family, const unsigned short proto, const unsigned short port) { unsigned int p, i; int error = 0; WARN_ON_ONCE(proto == 0 && port == 0); if (proto == 0 && port == 0) return -EINVAL; for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { error = progp->pg_rpcbind_set(net, progp, i, family, proto, port); if (error < 0) { printk(KERN_WARNING "svc: failed to register " "%sv%u RPC service (errno %d).\n", progp->pg_name, i, -error); break; } } } return error; } /* * If user space is running rpcbind, it should take the v4 UNSET * and clear everything for this [program, version]. If user space * is running portmap, it will reject the v4 UNSET, but won't have * any "inet6" entries anyway. So a PMAP_UNSET should be sufficient * in this case to clear all existing entries for [program, version]. */ static void __svc_unregister(struct net *net, const u32 program, const u32 version, const char *progname) { int error; error = rpcb_v4_register(net, program, version, NULL, ""); /* * User space didn't support rpcbind v4, so retry this * request with the legacy rpcbind v2 protocol. */ if (error == -EPROTONOSUPPORT) error = rpcb_register(net, program, version, 0, 0); trace_svc_unregister(progname, version, error); } /* * All netids, bind addresses and ports registered for [program, version] * are removed from the local rpcbind database (if the service is not * hidden) to make way for a new instance of the service. * * The result of unregistration is reported via dprintk for those who want * verification of the result, but is otherwise not important. */ static void svc_unregister(const struct svc_serv *serv, struct net *net) { struct sighand_struct *sighand; unsigned long flags; unsigned int p, i; clear_thread_flag(TIF_SIGPENDING); for (p = 0; p < serv->sv_nprogs; p++) { struct svc_program *progp = &serv->sv_programs[p]; for (i = 0; i < progp->pg_nvers; i++) { if (progp->pg_vers[i] == NULL) continue; if (progp->pg_vers[i]->vs_hidden) continue; __svc_unregister(net, progp->pg_prog, i, progp->pg_name); } } rcu_read_lock(); sighand = rcu_dereference(current->sighand); spin_lock_irqsave(&sighand->siglock, flags); recalc_sigpending(); spin_unlock_irqrestore(&sighand->siglock, flags); rcu_read_unlock(); } /* * dprintk the given error with the address of the client that caused it. */ #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) static __printf(2, 3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) { struct va_format vaf; va_list args; char buf[RPC_MAX_ADDRBUFLEN]; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf); va_end(args); } #else static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {} #endif __be32 svc_generic_init_request(struct svc_rqst *rqstp, const struct svc_program *progp, struct svc_process_info *ret) { const struct svc_version *versp = NULL; /* compiler food */ const struct svc_procedure *procp = NULL; if (rqstp->rq_vers >= progp->pg_nvers ) goto err_bad_vers; versp = progp->pg_vers[rqstp->rq_vers]; if (!versp) goto err_bad_vers; /* * Some protocol versions (namely NFSv4) require some form of * congestion control. (See RFC 7530 section 3.1 paragraph 2) * In other words, UDP is not allowed. We mark those when setting * up the svc_xprt, and verify that here. * * The spec is not very clear about what error should be returned * when someone tries to access a server that is listening on UDP * for lower versions. RPC_PROG_MISMATCH seems to be the closest * fit. */ if (versp->vs_need_cong_ctrl && rqstp->rq_xprt && !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags)) goto err_bad_vers; if (rqstp->rq_proc >= versp->vs_nproc) goto err_bad_proc; rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc]; /* Initialize storage for argp and resp */ memset(rqstp->rq_argp, 0, procp->pc_argzero); memset(rqstp->rq_resp, 0, procp->pc_ressize); /* Bump per-procedure stats counter */ this_cpu_inc(versp->vs_count[rqstp->rq_proc]); ret->dispatch = versp->vs_dispatch; return rpc_success; err_bad_vers: ret->mismatch.lovers = progp->pg_lovers; ret->mismatch.hivers = progp->pg_hivers; return rpc_prog_mismatch; err_bad_proc: return rpc_proc_unavail; } EXPORT_SYMBOL_GPL(svc_generic_init_request); /* * Common routine for processing the RPC request. */ static int svc_process_common(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_res_stream; struct svc_program *progp = NULL; const struct svc_procedure *procp = NULL; struct svc_serv *serv = rqstp->rq_server; struct svc_process_info process; enum svc_auth_status auth_res; unsigned int aoffset; int pr, rc; __be32 *p; /* Will be turned off only when NFSv4 Sessions are used */ set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags); clear_bit(RQ_DROPME, &rqstp->rq_flags); /* Construct the first words of the reply: */ svcxdr_init_encode(rqstp); xdr_stream_encode_be32(xdr, rqstp->rq_xid); xdr_stream_encode_be32(xdr, rpc_reply); p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4); if (unlikely(!p)) goto err_short_len; if (*p++ != cpu_to_be32(RPC_VERSION)) goto err_bad_rpc; xdr_stream_encode_be32(xdr, rpc_msg_accepted); rqstp->rq_prog = be32_to_cpup(p++); rqstp->rq_vers = be32_to_cpup(p++); rqstp->rq_proc = be32_to_cpup(p); for (pr = 0; pr < serv->sv_nprogs; pr++) if (rqstp->rq_prog == serv->sv_programs[pr].pg_prog) progp = &serv->sv_programs[pr]; /* * Decode auth data, and add verifier to reply buffer. * We do this before anything else in order to get a decent * auth verifier. */ auth_res = svc_authenticate(rqstp); /* Also give the program a chance to reject this call: */ if (auth_res == SVC_OK && progp) auth_res = progp->pg_authenticate(rqstp); trace_svc_authenticate(rqstp, auth_res); switch (auth_res) { case SVC_OK: break; case SVC_GARBAGE: goto err_garbage_args; case SVC_SYSERR: goto err_system_err; case SVC_DENIED: goto err_bad_auth; case SVC_CLOSE: goto close; case SVC_DROP: goto dropit; case SVC_COMPLETE: goto sendit; default: pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res); goto err_system_err; } if (progp == NULL) goto err_bad_prog; switch (progp->pg_init_request(rqstp, progp, &process)) { case rpc_success: break; case rpc_prog_unavail: goto err_bad_prog; case rpc_prog_mismatch: goto err_bad_vers; case rpc_proc_unavail: goto err_bad_proc; } procp = rqstp->rq_procinfo; /* Should this check go into the dispatcher? */ if (!procp || !procp->pc_func) goto err_bad_proc; /* Syntactic check complete */ if (serv->sv_stats) serv->sv_stats->rpccnt++; trace_svc_process(rqstp, progp->pg_name); aoffset = xdr_stream_pos(xdr); /* un-reserve some of the out-queue now that we have a * better idea of reply size */ if (procp->pc_xdrressize) svc_reserve_auth(rqstp, procp->pc_xdrressize<<2); /* Call the function that processes the request. */ rc = process.dispatch(rqstp); if (procp->pc_release) procp->pc_release(rqstp); xdr_finish_decode(xdr); if (!rc) goto dropit; if (rqstp->rq_auth_stat != rpc_auth_ok) goto err_bad_auth; if (*rqstp->rq_accept_statp != rpc_success) xdr_truncate_encode(xdr, aoffset); if (procp->pc_encode == NULL) goto dropit; sendit: if (svc_authorise(rqstp)) goto close_xprt; return 1; /* Caller can now send it */ dropit: svc_authorise(rqstp); /* doesn't hurt to call this twice */ dprintk("svc: svc_process dropit\n"); return 0; close: svc_authorise(rqstp); close_xprt: if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) svc_xprt_close(rqstp->rq_xprt); dprintk("svc: svc_process close\n"); return 0; err_short_len: svc_printk(rqstp, "short len %u, dropping request\n", rqstp->rq_arg.len); goto close_xprt; err_bad_rpc: if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); xdr_stream_encode_u32(xdr, RPC_MISMATCH); /* Only RPCv2 supported */ xdr_stream_encode_u32(xdr, RPC_VERSION); xdr_stream_encode_u32(xdr, RPC_VERSION); return 1; /* don't wrap */ err_bad_auth: dprintk("svc: authentication failed (%d)\n", be32_to_cpu(rqstp->rq_auth_stat)); if (serv->sv_stats) serv->sv_stats->rpcbadauth++; /* Restore write pointer to location of reply status: */ xdr_truncate_encode(xdr, XDR_UNIT * 2); xdr_stream_encode_u32(xdr, RPC_MSG_DENIED); xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR); xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat); goto sendit; err_bad_prog: dprintk("svc: unknown program %d\n", rqstp->rq_prog); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_prog_unavail; goto sendit; err_bad_vers: svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n", rqstp->rq_vers, rqstp->rq_prog, progp->pg_name); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_prog_mismatch; /* * svc_authenticate() has already added the verifier and * advanced the stream just past rq_accept_statp. */ xdr_stream_encode_u32(xdr, process.mismatch.lovers); xdr_stream_encode_u32(xdr, process.mismatch.hivers); goto sendit; err_bad_proc: svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_proc_unavail; goto sendit; err_garbage_args: svc_printk(rqstp, "failed to decode RPC header\n"); if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_garbage_args; goto sendit; err_system_err: if (serv->sv_stats) serv->sv_stats->rpcbadfmt++; *rqstp->rq_accept_statp = rpc_system_err; goto sendit; } /* * Drop request */ static void svc_drop(struct svc_rqst *rqstp) { trace_svc_drop(rqstp); } /** * svc_process - Execute one RPC transaction * @rqstp: RPC transaction context * */ void svc_process(struct svc_rqst *rqstp) { struct kvec *resv = &rqstp->rq_res.head[0]; __be32 *p; #if IS_ENABLED(CONFIG_FAIL_SUNRPC) if (!fail_sunrpc.ignore_server_disconnect && should_fail(&fail_sunrpc.attr, 1)) svc_xprt_deferred_close(rqstp->rq_xprt); #endif /* * Setup response xdr_buf. * Initially it has just one page */ rqstp->rq_next_page = &rqstp->rq_respages[1]; resv->iov_base = page_address(rqstp->rq_respages[0]); resv->iov_len = 0; rqstp->rq_res.pages = rqstp->rq_next_page; rqstp->rq_res.len = 0; rqstp->rq_res.page_base = 0; rqstp->rq_res.page_len = 0; rqstp->rq_res.buflen = PAGE_SIZE; rqstp->rq_res.tail[0].iov_base = NULL; rqstp->rq_res.tail[0].iov_len = 0; svcxdr_init_decode(rqstp); p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2); if (unlikely(!p)) goto out_drop; rqstp->rq_xid = *p++; if (unlikely(*p != rpc_call)) goto out_baddir; if (!svc_process_common(rqstp)) goto out_drop; svc_send(rqstp); return; out_baddir: svc_printk(rqstp, "bad direction 0x%08x, dropping request\n", be32_to_cpu(*p)); if (rqstp->rq_server->sv_stats) rqstp->rq_server->sv_stats->rpcbadfmt++; out_drop: svc_drop(rqstp); } #if defined(CONFIG_SUNRPC_BACKCHANNEL) /** * svc_process_bc - process a reverse-direction RPC request * @req: RPC request to be used for client-side processing * @rqstp: server-side execution context * */ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp) { struct rpc_timeout timeout = { .to_increment = 0, }; struct rpc_task *task; int proc_error; /* Build the svc_rqst used by the common processing routine */ rqstp->rq_xid = req->rq_xid; rqstp->rq_prot = req->rq_xprt->prot; rqstp->rq_bc_net = req->rq_xprt->xprt_net; rqstp->rq_addrlen = sizeof(req->rq_xprt->addr); memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen); memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg)); memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res)); /* Adjust the argument buffer length */ rqstp->rq_arg.len = req->rq_private_buf.len; if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) { rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len; rqstp->rq_arg.page_len = 0; } else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len) rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; else rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len; /* Reset the response buffer */ rqstp->rq_res.head[0].iov_len = 0; /* * Skip the XID and calldir fields because they've already * been processed by the caller. */ svcxdr_init_decode(rqstp); if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) return; /* Parse and execute the bc call */ proc_error = svc_process_common(rqstp); atomic_dec(&req->rq_xprt->bc_slot_count); if (!proc_error) { /* Processing error: drop the request */ xprt_free_bc_request(req); return; } /* Finally, send the reply synchronously */ if (rqstp->bc_to_initval > 0) { timeout.to_initval = rqstp->bc_to_initval; timeout.to_retries = rqstp->bc_to_retries; } else { timeout.to_initval = req->rq_xprt->timeout->to_initval; timeout.to_retries = req->rq_xprt->timeout->to_retries; } timeout.to_maxval = timeout.to_initval; memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); task = rpc_run_bc_task(req, &timeout); if (IS_ERR(task)) return; WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); rpc_put_task(task); } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ /** * svc_max_payload - Return transport-specific limit on the RPC payload * @rqstp: RPC transaction context * * Returns the maximum number of payload bytes the current transport * allows. */ u32 svc_max_payload(const struct svc_rqst *rqstp) { u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload; if (rqstp->rq_server->sv_max_payload < max) max = rqstp->rq_server->sv_max_payload; return max; } EXPORT_SYMBOL_GPL(svc_max_payload); /** * svc_proc_name - Return RPC procedure name in string form * @rqstp: svc_rqst to operate on * * Return value: * Pointer to a NUL-terminated string */ const char *svc_proc_name(const struct svc_rqst *rqstp) { if (rqstp && rqstp->rq_procinfo) return rqstp->rq_procinfo->pc_name; return "unknown"; } /** * svc_encode_result_payload - mark a range of bytes as a result payload * @rqstp: svc_rqst to operate on * @offset: payload's byte offset in rqstp->rq_res * @length: size of payload, in bytes * * Returns zero on success, or a negative errno if a permanent * error occurred. */ int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length) { return rqstp->rq_xprt->xpt_ops->xpo_result_payload(rqstp, offset, length); } EXPORT_SYMBOL_GPL(svc_encode_result_payload); /** * svc_fill_write_vector - Construct data argument for VFS write call * @rqstp: svc_rqst to operate on * @payload: xdr_buf containing only the write data payload * * Fills in rqstp::rq_vec, and returns the number of elements. */ unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct xdr_buf *payload) { struct page **pages = payload->pages; struct kvec *first = payload->head; struct kvec *vec = rqstp->rq_vec; size_t total = payload->len; unsigned int i; /* Some types of transport can present the write payload * entirely in rq_arg.pages. In this case, @first is empty. */ i = 0; if (first->iov_len) { vec[i].iov_base = first->iov_base; vec[i].iov_len = min_t(size_t, total, first->iov_len); total -= vec[i].iov_len; ++i; } while (total) { vec[i].iov_base = page_address(*pages); vec[i].iov_len = min_t(size_t, total, PAGE_SIZE); total -= vec[i].iov_len; ++i; ++pages; } WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec)); return i; } EXPORT_SYMBOL_GPL(svc_fill_write_vector); /** * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call * @rqstp: svc_rqst to operate on * @first: buffer containing first section of pathname * @p: buffer containing remaining section of pathname * @total: total length of the pathname argument * * The VFS symlink API demands a NUL-terminated pathname in mapped memory. * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free * the returned string. */ char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total) { size_t len, remaining; char *result, *dst; result = kmalloc(total + 1, GFP_KERNEL); if (!result) return ERR_PTR(-ESERVERFAULT); dst = result; remaining = total; len = min_t(size_t, total, first->iov_len); if (len) { memcpy(dst, first->iov_base, len); dst += len; remaining -= len; } if (remaining) { len = min_t(size_t, remaining, PAGE_SIZE); memcpy(dst, p, len); dst += len; } *dst = '\0'; /* Sanity check: Linux doesn't allow the pathname argument to * contain a NUL byte. */ if (strlen(result) != total) { kfree(result); return ERR_PTR(-EINVAL); } return result; } EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname); |
6 6 5 3 3 3 5 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 | // SPDX-License-Identifier: GPL-2.0-only /* * Marvell NFC-over-USB driver: USB interface related functions * * Copyright (C) 2014, Marvell International Ltd. */ #include <linux/module.h> #include <linux/usb.h> #include <linux/nfc.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "nfcmrvl.h" static struct usb_device_id nfcmrvl_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1286, 0x2046, USB_CLASS_VENDOR_SPEC, 4, 1) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, nfcmrvl_table); #define NFCMRVL_USB_BULK_RUNNING 1 #define NFCMRVL_USB_SUSPENDING 2 struct nfcmrvl_usb_drv_data { struct usb_device *udev; struct usb_interface *intf; unsigned long flags; struct work_struct waker; struct usb_anchor tx_anchor; struct usb_anchor bulk_anchor; struct usb_anchor deferred; int tx_in_flight; /* protects tx_in_flight */ spinlock_t txlock; struct usb_endpoint_descriptor *bulk_tx_ep; struct usb_endpoint_descriptor *bulk_rx_ep; int suspend_count; struct nfcmrvl_private *priv; }; static int nfcmrvl_inc_tx(struct nfcmrvl_usb_drv_data *drv_data) { unsigned long flags; int rv; spin_lock_irqsave(&drv_data->txlock, flags); rv = test_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags); if (!rv) drv_data->tx_in_flight++; spin_unlock_irqrestore(&drv_data->txlock, flags); return rv; } static void nfcmrvl_bulk_complete(struct urb *urb) { struct nfcmrvl_usb_drv_data *drv_data = urb->context; int err; dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d\n", urb, urb->status, urb->actual_length); if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags)) return; if (!urb->status) { struct sk_buff *skb; skb = nci_skb_alloc(drv_data->priv->ndev, urb->actual_length, GFP_ATOMIC); if (!skb) { nfc_err(&drv_data->udev->dev, "failed to alloc mem\n"); } else { skb_put_data(skb, urb->transfer_buffer, urb->actual_length); if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0) nfc_err(&drv_data->udev->dev, "corrupted Rx packet\n"); } } if (!test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags)) return; usb_anchor_urb(urb, &drv_data->bulk_anchor); usb_mark_last_busy(drv_data->udev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) nfc_err(&drv_data->udev->dev, "urb %p failed to resubmit (%d)\n", urb, -err); usb_unanchor_urb(urb); } } static int nfcmrvl_submit_bulk_urb(struct nfcmrvl_usb_drv_data *drv_data, gfp_t mem_flags) { struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = NFCMRVL_NCI_MAX_EVENT_SIZE; if (!drv_data->bulk_rx_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(drv_data->udev, drv_data->bulk_rx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, drv_data->udev, pipe, buf, size, nfcmrvl_bulk_complete, drv_data); urb->transfer_flags |= URB_FREE_BUFFER; usb_mark_last_busy(drv_data->udev); usb_anchor_urb(urb, &drv_data->bulk_anchor); err = usb_submit_urb(urb, mem_flags); if (err) { if (err != -EPERM && err != -ENODEV) nfc_err(&drv_data->udev->dev, "urb %p submission failed (%d)\n", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void nfcmrvl_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct nci_dev *ndev = (struct nci_dev *)skb->dev; struct nfcmrvl_private *priv = nci_get_drvdata(ndev); struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data; unsigned long flags; nfc_info(priv->dev, "urb %p status %d count %d\n", urb, urb->status, urb->actual_length); spin_lock_irqsave(&drv_data->txlock, flags); drv_data->tx_in_flight--; spin_unlock_irqrestore(&drv_data->txlock, flags); kfree(urb->setup_packet); kfree_skb(skb); } static int nfcmrvl_usb_nci_open(struct nfcmrvl_private *priv) { struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data; int err; err = usb_autopm_get_interface(drv_data->intf); if (err) return err; drv_data->intf->needs_remote_wakeup = 1; err = nfcmrvl_submit_bulk_urb(drv_data, GFP_KERNEL); if (err) goto failed; set_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags); nfcmrvl_submit_bulk_urb(drv_data, GFP_KERNEL); usb_autopm_put_interface(drv_data->intf); return 0; failed: usb_autopm_put_interface(drv_data->intf); return err; } static void nfcmrvl_usb_stop_traffic(struct nfcmrvl_usb_drv_data *drv_data) { usb_kill_anchored_urbs(&drv_data->bulk_anchor); } static int nfcmrvl_usb_nci_close(struct nfcmrvl_private *priv) { struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data; int err; cancel_work_sync(&drv_data->waker); clear_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags); nfcmrvl_usb_stop_traffic(drv_data); usb_kill_anchored_urbs(&drv_data->tx_anchor); err = usb_autopm_get_interface(drv_data->intf); if (err) goto failed; drv_data->intf->needs_remote_wakeup = 0; usb_autopm_put_interface(drv_data->intf); failed: usb_scuttle_anchored_urbs(&drv_data->deferred); return 0; } static int nfcmrvl_usb_nci_send(struct nfcmrvl_private *priv, struct sk_buff *skb) { struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data; struct urb *urb; unsigned int pipe; int err; if (!drv_data->bulk_tx_ep) return -ENODEV; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndbulkpipe(drv_data->udev, drv_data->bulk_tx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, drv_data->udev, pipe, skb->data, skb->len, nfcmrvl_tx_complete, skb); err = nfcmrvl_inc_tx(drv_data); if (err) { usb_anchor_urb(urb, &drv_data->deferred); schedule_work(&drv_data->waker); err = 0; goto done; } usb_anchor_urb(urb, &drv_data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { if (err != -EPERM && err != -ENODEV) nfc_err(&drv_data->udev->dev, "urb %p submission failed (%d)\n", urb, -err); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(drv_data->udev); } done: usb_free_urb(urb); return err; } static const struct nfcmrvl_if_ops usb_ops = { .nci_open = nfcmrvl_usb_nci_open, .nci_close = nfcmrvl_usb_nci_close, .nci_send = nfcmrvl_usb_nci_send, }; static void nfcmrvl_waker(struct work_struct *work) { struct nfcmrvl_usb_drv_data *drv_data = container_of(work, struct nfcmrvl_usb_drv_data, waker); int err; err = usb_autopm_get_interface(drv_data->intf); if (err) return; usb_autopm_put_interface(drv_data->intf); } static int nfcmrvl_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct nfcmrvl_usb_drv_data *drv_data; struct nfcmrvl_private *priv; int i; struct usb_device *udev = interface_to_usbdev(intf); struct nfcmrvl_platform_data config; /* No configuration for USB */ memset(&config, 0, sizeof(config)); config.reset_n_io = -EINVAL; nfc_info(&udev->dev, "intf %p id %p\n", intf, id); drv_data = devm_kzalloc(&intf->dev, sizeof(*drv_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep_desc; ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!drv_data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { drv_data->bulk_tx_ep = ep_desc; } else if (!drv_data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { drv_data->bulk_rx_ep = ep_desc; } } if (!drv_data->bulk_tx_ep || !drv_data->bulk_rx_ep) return -ENODEV; drv_data->udev = udev; drv_data->intf = intf; INIT_WORK(&drv_data->waker, nfcmrvl_waker); spin_lock_init(&drv_data->txlock); init_usb_anchor(&drv_data->tx_anchor); init_usb_anchor(&drv_data->bulk_anchor); init_usb_anchor(&drv_data->deferred); priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_USB, drv_data, &usb_ops, &intf->dev, &config); if (IS_ERR(priv)) return PTR_ERR(priv); drv_data->priv = priv; drv_data->priv->support_fw_dnld = false; usb_set_intfdata(intf, drv_data); return 0; } static void nfcmrvl_disconnect(struct usb_interface *intf) { struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf); if (!drv_data) return; nfc_info(&drv_data->udev->dev, "intf %p\n", intf); nfcmrvl_nci_unregister_dev(drv_data->priv); usb_set_intfdata(drv_data->intf, NULL); } #ifdef CONFIG_PM static int nfcmrvl_suspend(struct usb_interface *intf, pm_message_t message) { struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf); nfc_info(&drv_data->udev->dev, "intf %p\n", intf); if (drv_data->suspend_count++) return 0; spin_lock_irq(&drv_data->txlock); if (!(PMSG_IS_AUTO(message) && drv_data->tx_in_flight)) { set_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags); spin_unlock_irq(&drv_data->txlock); } else { spin_unlock_irq(&drv_data->txlock); drv_data->suspend_count--; return -EBUSY; } nfcmrvl_usb_stop_traffic(drv_data); usb_kill_anchored_urbs(&drv_data->tx_anchor); return 0; } static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data) { struct urb *urb; int err; while ((urb = usb_get_from_anchor(&drv_data->deferred))) { usb_anchor_urb(urb, &drv_data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { kfree(urb->setup_packet); usb_unanchor_urb(urb); usb_free_urb(urb); break; } drv_data->tx_in_flight++; usb_free_urb(urb); } /* Cleanup the rest deferred urbs. */ while ((urb = usb_get_from_anchor(&drv_data->deferred))) { kfree(urb->setup_packet); usb_free_urb(urb); } } static int nfcmrvl_resume(struct usb_interface *intf) { struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf); int err = 0; nfc_info(&drv_data->udev->dev, "intf %p\n", intf); if (--drv_data->suspend_count) return 0; if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags)) goto done; if (test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags)) { err = nfcmrvl_submit_bulk_urb(drv_data, GFP_NOIO); if (err) { clear_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags); goto failed; } nfcmrvl_submit_bulk_urb(drv_data, GFP_NOIO); } spin_lock_irq(&drv_data->txlock); nfcmrvl_play_deferred(drv_data); clear_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags); spin_unlock_irq(&drv_data->txlock); return 0; failed: usb_scuttle_anchored_urbs(&drv_data->deferred); done: spin_lock_irq(&drv_data->txlock); clear_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags); spin_unlock_irq(&drv_data->txlock); return err; } #endif static struct usb_driver nfcmrvl_usb_driver = { .name = "nfcmrvl", .probe = nfcmrvl_probe, .disconnect = nfcmrvl_disconnect, #ifdef CONFIG_PM .suspend = nfcmrvl_suspend, .resume = nfcmrvl_resume, .reset_resume = nfcmrvl_resume, #endif .id_table = nfcmrvl_table, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, .soft_unbind = 1, }; module_usb_driver(nfcmrvl_usb_driver); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell NFC-over-USB driver"); MODULE_LICENSE("GPL v2"); |
1 1 2 2 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2020 Facebook */ #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/rcupdate_trace.h> struct bpf_iter_target_info { struct list_head list; const struct bpf_iter_reg *reg_info; u32 btf_id; /* cached value */ }; struct bpf_iter_link { struct bpf_link link; struct bpf_iter_aux_info aux; struct bpf_iter_target_info *tinfo; }; struct bpf_iter_priv_data { struct bpf_iter_target_info *tinfo; const struct bpf_iter_seq_info *seq_info; struct bpf_prog *prog; u64 session_id; u64 seq_num; bool done_stop; u8 target_private[] __aligned(8); }; static struct list_head targets = LIST_HEAD_INIT(targets); static DEFINE_MUTEX(targets_mutex); /* protect bpf_iter_link changes */ static DEFINE_MUTEX(link_mutex); /* incremented on every opened seq_file */ static atomic64_t session_id; static int prepare_seq_file(struct file *file, struct bpf_iter_link *link, const struct bpf_iter_seq_info *seq_info); static void bpf_iter_inc_seq_num(struct seq_file *seq) { struct bpf_iter_priv_data *iter_priv; iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); iter_priv->seq_num++; } static void bpf_iter_dec_seq_num(struct seq_file *seq) { struct bpf_iter_priv_data *iter_priv; iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); iter_priv->seq_num--; } static void bpf_iter_done_stop(struct seq_file *seq) { struct bpf_iter_priv_data *iter_priv; iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); iter_priv->done_stop = true; } static inline bool bpf_iter_target_support_resched(const struct bpf_iter_target_info *tinfo) { return tinfo->reg_info->feature & BPF_ITER_RESCHED; } static bool bpf_iter_support_resched(struct seq_file *seq) { struct bpf_iter_priv_data *iter_priv; iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); return bpf_iter_target_support_resched(iter_priv->tinfo); } /* maximum visited objects before bailing out */ #define MAX_ITER_OBJECTS 1000000 /* bpf_seq_read, a customized and simpler version for bpf iterator. * The following are differences from seq_read(): * . fixed buffer size (PAGE_SIZE) * . assuming NULL ->llseek() * . stop() may call bpf program, handling potential overflow there */ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct seq_file *seq = file->private_data; size_t n, offs, copied = 0; int err = 0, num_objs = 0; bool can_resched; void *p; mutex_lock(&seq->lock); if (!seq->buf) { seq->size = PAGE_SIZE << 3; seq->buf = kvmalloc(seq->size, GFP_KERNEL); if (!seq->buf) { err = -ENOMEM; goto done; } } if (seq->count) { n = min(seq->count, size); err = copy_to_user(buf, seq->buf + seq->from, n); if (err) { err = -EFAULT; goto done; } seq->count -= n; seq->from += n; copied = n; goto done; } seq->from = 0; p = seq->op->start(seq, &seq->index); if (!p) goto stop; if (IS_ERR(p)) { err = PTR_ERR(p); seq->op->stop(seq, p); seq->count = 0; goto done; } err = seq->op->show(seq, p); if (err > 0) { /* object is skipped, decrease seq_num, so next * valid object can reuse the same seq_num. */ bpf_iter_dec_seq_num(seq); seq->count = 0; } else if (err < 0 || seq_has_overflowed(seq)) { if (!err) err = -E2BIG; seq->op->stop(seq, p); seq->count = 0; goto done; } can_resched = bpf_iter_support_resched(seq); while (1) { loff_t pos = seq->index; num_objs++; offs = seq->count; p = seq->op->next(seq, p, &seq->index); if (pos == seq->index) { pr_info_ratelimited("buggy seq_file .next function %ps " "did not updated position index\n", seq->op->next); seq->index++; } if (IS_ERR_OR_NULL(p)) break; /* got a valid next object, increase seq_num */ bpf_iter_inc_seq_num(seq); if (seq->count >= size) break; if (num_objs >= MAX_ITER_OBJECTS) { if (offs == 0) { err = -EAGAIN; seq->op->stop(seq, p); goto done; } break; } err = seq->op->show(seq, p); if (err > 0) { bpf_iter_dec_seq_num(seq); seq->count = offs; } else if (err < 0 || seq_has_overflowed(seq)) { seq->count = offs; if (offs == 0) { if (!err) err = -E2BIG; seq->op->stop(seq, p); goto done; } break; } if (can_resched) cond_resched(); } stop: offs = seq->count; if (IS_ERR(p)) { seq->op->stop(seq, NULL); err = PTR_ERR(p); goto done; } /* bpf program called if !p */ seq->op->stop(seq, p); if (!p) { if (!seq_has_overflowed(seq)) { bpf_iter_done_stop(seq); } else { seq->count = offs; if (offs == 0) { err = -E2BIG; goto done; } } } n = min(seq->count, size); err = copy_to_user(buf, seq->buf, n); if (err) { err = -EFAULT; goto done; } copied = n; seq->count -= n; seq->from = n; done: if (!copied) copied = err; else *ppos += copied; mutex_unlock(&seq->lock); return copied; } static const struct bpf_iter_seq_info * __get_seq_info(struct bpf_iter_link *link) { const struct bpf_iter_seq_info *seq_info; if (link->aux.map) { seq_info = link->aux.map->ops->iter_seq_info; if (seq_info) return seq_info; } return link->tinfo->reg_info->seq_info; } static int iter_open(struct inode *inode, struct file *file) { struct bpf_iter_link *link = inode->i_private; return prepare_seq_file(file, link, __get_seq_info(link)); } static int iter_release(struct inode *inode, struct file *file) { struct bpf_iter_priv_data *iter_priv; struct seq_file *seq; seq = file->private_data; if (!seq) return 0; iter_priv = container_of(seq->private, struct bpf_iter_priv_data, target_private); if (iter_priv->seq_info->fini_seq_private) iter_priv->seq_info->fini_seq_private(seq->private); bpf_prog_put(iter_priv->prog); seq->private = iter_priv; return seq_release_private(inode, file); } const struct file_operations bpf_iter_fops = { .open = iter_open, .read = bpf_seq_read, .release = iter_release, }; /* The argument reg_info will be cached in bpf_iter_target_info. * The common practice is to declare target reg_info as * a const static variable and passed as an argument to * bpf_iter_reg_target(). */ int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info) { struct bpf_iter_target_info *tinfo; tinfo = kzalloc(sizeof(*tinfo), GFP_KERNEL); if (!tinfo) return -ENOMEM; tinfo->reg_info = reg_info; INIT_LIST_HEAD(&tinfo->list); mutex_lock(&targets_mutex); list_add(&tinfo->list, &targets); mutex_unlock(&targets_mutex); return 0; } void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info) { struct bpf_iter_target_info *tinfo; bool found = false; mutex_lock(&targets_mutex); list_for_each_entry(tinfo, &targets, list) { if (reg_info == tinfo->reg_info) { list_del(&tinfo->list); kfree(tinfo); found = true; break; } } mutex_unlock(&targets_mutex); WARN_ON(found == false); } static void cache_btf_id(struct bpf_iter_target_info *tinfo, struct bpf_prog *prog) { tinfo->btf_id = prog->aux->attach_btf_id; } int bpf_iter_prog_supported(struct bpf_prog *prog) { const char *attach_fname = prog->aux->attach_func_name; struct bpf_iter_target_info *tinfo = NULL, *iter; u32 prog_btf_id = prog->aux->attach_btf_id; const char *prefix = BPF_ITER_FUNC_PREFIX; int prefix_len = strlen(prefix); if (strncmp(attach_fname, prefix, prefix_len)) return -EINVAL; mutex_lock(&targets_mutex); list_for_each_entry(iter, &targets, list) { if (iter->btf_id && iter->btf_id == prog_btf_id) { tinfo = iter; break; } if (!strcmp(attach_fname + prefix_len, iter->reg_info->target)) { cache_btf_id(iter, prog); tinfo = iter; break; } } mutex_unlock(&targets_mutex); if (!tinfo) return -EINVAL; return bpf_prog_ctx_arg_info_init(prog, tinfo->reg_info->ctx_arg_info, tinfo->reg_info->ctx_arg_info_size); } const struct bpf_func_proto * bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { const struct bpf_iter_target_info *tinfo; const struct bpf_func_proto *fn = NULL; mutex_lock(&targets_mutex); list_for_each_entry(tinfo, &targets, list) { if (tinfo->btf_id == prog->aux->attach_btf_id) { const struct bpf_iter_reg *reg_info; reg_info = tinfo->reg_info; if (reg_info->get_func_proto) fn = reg_info->get_func_proto(func_id, prog); break; } } mutex_unlock(&targets_mutex); return fn; } static void bpf_iter_link_release(struct bpf_link *link) { struct bpf_iter_link *iter_link = container_of(link, struct bpf_iter_link, link); if (iter_link->tinfo->reg_info->detach_target) iter_link->tinfo->reg_info->detach_target(&iter_link->aux); } static void bpf_iter_link_dealloc(struct bpf_link *link) { struct bpf_iter_link *iter_link = container_of(link, struct bpf_iter_link, link); kfree(iter_link); } static int bpf_iter_link_replace(struct bpf_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog) { int ret = 0; mutex_lock(&link_mutex); if (old_prog && link->prog != old_prog) { ret = -EPERM; goto out_unlock; } if (link->prog->type != new_prog->type || link->prog->expected_attach_type != new_prog->expected_attach_type || link->prog->aux->attach_btf_id != new_prog->aux->attach_btf_id) { ret = -EINVAL; goto out_unlock; } old_prog = xchg(&link->prog, new_prog); bpf_prog_put(old_prog); out_unlock: mutex_unlock(&link_mutex); return ret; } static void bpf_iter_link_show_fdinfo(const struct bpf_link *link, struct seq_file *seq) { struct bpf_iter_link *iter_link = container_of(link, struct bpf_iter_link, link); bpf_iter_show_fdinfo_t show_fdinfo; seq_printf(seq, "target_name:\t%s\n", iter_link->tinfo->reg_info->target); show_fdinfo = iter_link->tinfo->reg_info->show_fdinfo; if (show_fdinfo) show_fdinfo(&iter_link->aux, seq); } static int bpf_iter_link_fill_link_info(const struct bpf_link *link, struct bpf_link_info *info) { struct bpf_iter_link *iter_link = container_of(link, struct bpf_iter_link, link); char __user *ubuf = u64_to_user_ptr(info->iter.target_name); bpf_iter_fill_link_info_t fill_link_info; u32 ulen = info->iter.target_name_len; const char *target_name; u32 target_len; if (!ulen ^ !ubuf) return -EINVAL; target_name = iter_link->tinfo->reg_info->target; target_len = strlen(target_name); info->iter.target_name_len = target_len + 1; if (ubuf) { if (ulen >= target_len + 1) { if (copy_to_user(ubuf, target_name, target_len + 1)) return -EFAULT; } else { char zero = '\0'; if (copy_to_user(ubuf, target_name, ulen - 1)) return -EFAULT; if (put_user(zero, ubuf + ulen - 1)) return -EFAULT; return -ENOSPC; } } fill_link_info = iter_link->tinfo->reg_info->fill_link_info; if (fill_link_info) return fill_link_info(&iter_link->aux, info); return 0; } static const struct bpf_link_ops bpf_iter_link_lops = { .release = bpf_iter_link_release, .dealloc = bpf_iter_link_dealloc, .update_prog = bpf_iter_link_replace, .show_fdinfo = bpf_iter_link_show_fdinfo, .fill_link_info = bpf_iter_link_fill_link_info, }; bool bpf_link_is_iter(struct bpf_link *link) { return link->ops == &bpf_iter_link_lops; } int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog) { struct bpf_iter_target_info *tinfo = NULL, *iter; struct bpf_link_primer link_primer; union bpf_iter_link_info linfo; struct bpf_iter_link *link; u32 prog_btf_id, linfo_len; bpfptr_t ulinfo; int err; if (attr->link_create.target_fd || attr->link_create.flags) return -EINVAL; memset(&linfo, 0, sizeof(union bpf_iter_link_info)); ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel); linfo_len = attr->link_create.iter_info_len; if (bpfptr_is_null(ulinfo) ^ !linfo_len) return -EINVAL; if (!bpfptr_is_null(ulinfo)) { err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo), linfo_len); if (err) return err; linfo_len = min_t(u32, linfo_len, sizeof(linfo)); if (copy_from_bpfptr(&linfo, ulinfo, linfo_len)) return -EFAULT; } prog_btf_id = prog->aux->attach_btf_id; mutex_lock(&targets_mutex); list_for_each_entry(iter, &targets, list) { if (iter->btf_id == prog_btf_id) { tinfo = iter; break; } } mutex_unlock(&targets_mutex); if (!tinfo) return -ENOENT; /* Only allow sleepable program for resched-able iterator */ if (prog->sleepable && !bpf_iter_target_support_resched(tinfo)) return -EINVAL; link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN); if (!link) return -ENOMEM; bpf_link_init(&link->link, BPF_LINK_TYPE_ITER, &bpf_iter_link_lops, prog); link->tinfo = tinfo; err = bpf_link_prime(&link->link, &link_primer); if (err) { kfree(link); return err; } if (tinfo->reg_info->attach_target) { err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux); if (err) { bpf_link_cleanup(&link_primer); return err; } } return bpf_link_settle(&link_primer); } static void init_seq_meta(struct bpf_iter_priv_data *priv_data, struct bpf_iter_target_info *tinfo, const struct bpf_iter_seq_info *seq_info, struct bpf_prog *prog) { priv_data->tinfo = tinfo; priv_data->seq_info = seq_info; priv_data->prog = prog; priv_data->session_id = atomic64_inc_return(&session_id); priv_data->seq_num = 0; priv_data->done_stop = false; } static int prepare_seq_file(struct file *file, struct bpf_iter_link *link, const struct bpf_iter_seq_info *seq_info) { struct bpf_iter_priv_data *priv_data; struct bpf_iter_target_info *tinfo; struct bpf_prog *prog; u32 total_priv_dsize; struct seq_file *seq; int err = 0; mutex_lock(&link_mutex); prog = link->link.prog; bpf_prog_inc(prog); mutex_unlock(&link_mutex); tinfo = link->tinfo; total_priv_dsize = offsetof(struct bpf_iter_priv_data, target_private) + seq_info->seq_priv_size; priv_data = __seq_open_private(file, seq_info->seq_ops, total_priv_dsize); if (!priv_data) { err = -ENOMEM; goto release_prog; } if (seq_info->init_seq_private) { err = seq_info->init_seq_private(priv_data->target_private, &link->aux); if (err) goto release_seq_file; } init_seq_meta(priv_data, tinfo, seq_info, prog); seq = file->private_data; seq->private = priv_data->target_private; return 0; release_seq_file: seq_release_private(file->f_inode, file); file->private_data = NULL; release_prog: bpf_prog_put(prog); return err; } int bpf_iter_new_fd(struct bpf_link *link) { struct bpf_iter_link *iter_link; struct file *file; unsigned int flags; int err, fd; if (link->ops != &bpf_iter_link_lops) return -EINVAL; flags = O_RDONLY | O_CLOEXEC; fd = get_unused_fd_flags(flags); if (fd < 0) return fd; file = anon_inode_getfile("bpf_iter", &bpf_iter_fops, NULL, flags); if (IS_ERR(file)) { err = PTR_ERR(file); goto free_fd; } iter_link = container_of(link, struct bpf_iter_link, link); err = prepare_seq_file(file, iter_link, __get_seq_info(iter_link)); if (err) goto free_file; fd_install(fd, file); return fd; free_file: fput(file); free_fd: put_unused_fd(fd); return err; } struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop) { struct bpf_iter_priv_data *iter_priv; struct seq_file *seq; void *seq_priv; seq = meta->seq; if (seq->file->f_op != &bpf_iter_fops) return NULL; seq_priv = seq->private; iter_priv = container_of(seq_priv, struct bpf_iter_priv_data, target_private); if (in_stop && iter_priv->done_stop) return NULL; meta->session_id = iter_priv->session_id; meta->seq_num = iter_priv->seq_num; return iter_priv->prog; } int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx) { struct bpf_run_ctx run_ctx, *old_run_ctx; int ret; if (prog->sleepable) { rcu_read_lock_trace(); migrate_disable(); might_fault(); old_run_ctx = bpf_set_run_ctx(&run_ctx); ret = bpf_prog_run(prog, ctx); bpf_reset_run_ctx(old_run_ctx); migrate_enable(); rcu_read_unlock_trace(); } else { rcu_read_lock(); migrate_disable(); old_run_ctx = bpf_set_run_ctx(&run_ctx); ret = bpf_prog_run(prog, ctx); bpf_reset_run_ctx(old_run_ctx); migrate_enable(); rcu_read_unlock(); } /* bpf program can only return 0 or 1: * 0 : okay * 1 : retry the same object * The bpf_iter_run_prog() return value * will be seq_ops->show() return value. */ return ret == 0 ? 0 : -EAGAIN; } BPF_CALL_4(bpf_for_each_map_elem, struct bpf_map *, map, void *, callback_fn, void *, callback_ctx, u64, flags) { return map->ops->map_for_each_callback(map, callback_fn, callback_ctx, flags); } const struct bpf_func_proto bpf_for_each_map_elem_proto = { .func = bpf_for_each_map_elem, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_PTR_TO_FUNC, .arg3_type = ARG_PTR_TO_STACK_OR_NULL, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_loop, u32, nr_loops, void *, callback_fn, void *, callback_ctx, u64, flags) { bpf_callback_t callback = (bpf_callback_t)callback_fn; u64 ret; u32 i; /* Note: these safety checks are also verified when bpf_loop * is inlined, be careful to modify this code in sync. See * function verifier.c:inline_bpf_loop. */ if (flags) return -EINVAL; if (nr_loops > BPF_MAX_LOOPS) return -E2BIG; for (i = 0; i < nr_loops; i++) { ret = callback((u64)i, (u64)(long)callback_ctx, 0, 0, 0); /* return value: 0 - continue, 1 - stop and return */ if (ret) return i + 1; } return i; } const struct bpf_func_proto bpf_loop_proto = { .func = bpf_loop, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_PTR_TO_FUNC, .arg3_type = ARG_PTR_TO_STACK_OR_NULL, .arg4_type = ARG_ANYTHING, }; struct bpf_iter_num_kern { int cur; /* current value, inclusive */ int end; /* final value, exclusive */ } __aligned(8); __bpf_kfunc_start_defs(); __bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) { struct bpf_iter_num_kern *s = (void *)it; BUILD_BUG_ON(sizeof(struct bpf_iter_num_kern) != sizeof(struct bpf_iter_num)); BUILD_BUG_ON(__alignof__(struct bpf_iter_num_kern) != __alignof__(struct bpf_iter_num)); /* start == end is legit, it's an empty range and we'll just get NULL * on first (and any subsequent) bpf_iter_num_next() call */ if (start > end) { s->cur = s->end = 0; return -EINVAL; } /* avoid overflows, e.g., if start == INT_MIN and end == INT_MAX */ if ((s64)end - (s64)start > BPF_MAX_LOOPS) { s->cur = s->end = 0; return -E2BIG; } /* user will call bpf_iter_num_next() first, * which will set s->cur to exactly start value; * underflow shouldn't matter */ s->cur = start - 1; s->end = end; return 0; } __bpf_kfunc int *bpf_iter_num_next(struct bpf_iter_num* it) { struct bpf_iter_num_kern *s = (void *)it; /* check failed initialization or if we are done (same behavior); * need to be careful about overflow, so convert to s64 for checks, * e.g., if s->cur == s->end == INT_MAX, we can't just do * s->cur + 1 >= s->end */ if ((s64)(s->cur + 1) >= s->end) { s->cur = s->end = 0; return NULL; } s->cur++; return &s->cur; } __bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it) { struct bpf_iter_num_kern *s = (void *)it; s->cur = s->end = 0; } __bpf_kfunc_end_defs(); |
12 12 3 3 27 28 6 14 1 1 1 12 12 9 9 10 10 10 10 13 17 3 3 2 3 3 13 7 2188 2164 23 17 7 17 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007, 2008, 2009 Siemens AG */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <net/cfg802154.h> #include <net/rtnetlink.h> #include "ieee802154.h" #include "nl802154.h" #include "sysfs.h" #include "core.h" /* name for sysfs, %d is appended */ #define PHY_NAME "phy" /* RCU-protected (and RTNL for writers) */ LIST_HEAD(cfg802154_rdev_list); int cfg802154_rdev_list_generation; struct wpan_phy *wpan_phy_find(const char *str) { struct device *dev; if (WARN_ON(!str)) return NULL; dev = class_find_device_by_name(&wpan_phy_class, str); if (!dev) return NULL; return container_of(dev, struct wpan_phy, dev); } EXPORT_SYMBOL(wpan_phy_find); struct wpan_phy_iter_data { int (*fn)(struct wpan_phy *phy, void *data); void *data; }; static int wpan_phy_iter(struct device *dev, void *_data) { struct wpan_phy_iter_data *wpid = _data; struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); return wpid->fn(phy, wpid->data); } int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data) { struct wpan_phy_iter_data wpid = { .fn = fn, .data = data, }; return class_for_each_device(&wpan_phy_class, NULL, &wpid, wpan_phy_iter); } EXPORT_SYMBOL(wpan_phy_for_each); struct cfg802154_registered_device * cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx) { struct cfg802154_registered_device *result = NULL, *rdev; ASSERT_RTNL(); list_for_each_entry(rdev, &cfg802154_rdev_list, list) { if (rdev->wpan_phy_idx == wpan_phy_idx) { result = rdev; break; } } return result; } struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx) { struct cfg802154_registered_device *rdev; ASSERT_RTNL(); rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx); if (!rdev) return NULL; return &rdev->wpan_phy; } struct wpan_phy * wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size) { static atomic_t wpan_phy_counter = ATOMIC_INIT(0); struct cfg802154_registered_device *rdev; size_t alloc_size; alloc_size = sizeof(*rdev) + priv_size; rdev = kzalloc(alloc_size, GFP_KERNEL); if (!rdev) return NULL; rdev->ops = ops; rdev->wpan_phy_idx = atomic_inc_return(&wpan_phy_counter); if (unlikely(rdev->wpan_phy_idx < 0)) { /* ugh, wrapped! */ atomic_dec(&wpan_phy_counter); kfree(rdev); return NULL; } /* atomic_inc_return makes it start at 1, make it start at 0 */ rdev->wpan_phy_idx--; INIT_LIST_HEAD(&rdev->wpan_dev_list); device_initialize(&rdev->wpan_phy.dev); dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx); rdev->wpan_phy.dev.class = &wpan_phy_class; rdev->wpan_phy.dev.platform_data = rdev; wpan_phy_net_set(&rdev->wpan_phy, &init_net); init_waitqueue_head(&rdev->dev_wait); init_waitqueue_head(&rdev->wpan_phy.sync_txq); spin_lock_init(&rdev->wpan_phy.queue_lock); return &rdev->wpan_phy; } EXPORT_SYMBOL(wpan_phy_new); int wpan_phy_register(struct wpan_phy *phy) { struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy); int ret; rtnl_lock(); ret = device_add(&phy->dev); if (ret) { rtnl_unlock(); return ret; } list_add_rcu(&rdev->list, &cfg802154_rdev_list); cfg802154_rdev_list_generation++; /* TODO phy registered lock */ rtnl_unlock(); /* TODO nl802154 phy notify */ return 0; } EXPORT_SYMBOL(wpan_phy_register); void wpan_phy_unregister(struct wpan_phy *phy) { struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy); wait_event(rdev->dev_wait, ({ int __count; rtnl_lock(); __count = rdev->opencount; rtnl_unlock(); __count == 0; })); rtnl_lock(); /* TODO nl802154 phy notify */ /* TODO phy registered lock */ WARN_ON(!list_empty(&rdev->wpan_dev_list)); /* First remove the hardware from everywhere, this makes * it impossible to find from userspace. */ list_del_rcu(&rdev->list); synchronize_rcu(); cfg802154_rdev_list_generation++; device_del(&phy->dev); rtnl_unlock(); } EXPORT_SYMBOL(wpan_phy_unregister); void wpan_phy_free(struct wpan_phy *phy) { put_device(&phy->dev); } EXPORT_SYMBOL(wpan_phy_free); static void cfg802154_free_peer_structures(struct wpan_dev *wpan_dev) { struct ieee802154_pan_device *child, *tmp; mutex_lock(&wpan_dev->association_lock); kfree(wpan_dev->parent); wpan_dev->parent = NULL; list_for_each_entry_safe(child, tmp, &wpan_dev->children, node) { list_del(&child->node); kfree(child); } wpan_dev->nchildren = 0; mutex_unlock(&wpan_dev->association_lock); } int cfg802154_switch_netns(struct cfg802154_registered_device *rdev, struct net *net) { struct wpan_dev *wpan_dev; int err = 0; list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) { if (!wpan_dev->netdev) continue; wpan_dev->netdev->netns_immutable = false; err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d"); if (err) break; wpan_dev->netdev->netns_immutable = true; } if (err) { /* failed -- clean up to old netns */ net = wpan_phy_net(&rdev->wpan_phy); list_for_each_entry_continue_reverse(wpan_dev, &rdev->wpan_dev_list, list) { if (!wpan_dev->netdev) continue; wpan_dev->netdev->netns_immutable = false; err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d"); WARN_ON(err); wpan_dev->netdev->netns_immutable = true; } return err; } wpan_phy_net_set(&rdev->wpan_phy, net); err = device_rename(&rdev->wpan_phy.dev, dev_name(&rdev->wpan_phy.dev)); WARN_ON(err); return 0; } void cfg802154_dev_free(struct cfg802154_registered_device *rdev) { kfree(rdev); } static void cfg802154_update_iface_num(struct cfg802154_registered_device *rdev, int iftype, int num) { ASSERT_RTNL(); rdev->num_running_ifaces += num; } static int cfg802154_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct cfg802154_registered_device *rdev; if (!wpan_dev) return NOTIFY_DONE; rdev = wpan_phy_to_rdev(wpan_dev->wpan_phy); /* TODO WARN_ON unspec type */ switch (state) { /* TODO NETDEV_DEVTYPE */ case NETDEV_REGISTER: dev->netns_immutable = true; wpan_dev->identifier = ++rdev->wpan_dev_id; list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list); rdev->devlist_generation++; mutex_init(&wpan_dev->association_lock); INIT_LIST_HEAD(&wpan_dev->children); wpan_dev->max_associations = SZ_16K; wpan_dev->netdev = dev; break; case NETDEV_DOWN: cfg802154_update_iface_num(rdev, wpan_dev->iftype, -1); rdev->opencount--; wake_up(&rdev->dev_wait); break; case NETDEV_UP: cfg802154_update_iface_num(rdev, wpan_dev->iftype, 1); rdev->opencount++; break; case NETDEV_UNREGISTER: cfg802154_free_peer_structures(wpan_dev); /* It is possible to get NETDEV_UNREGISTER * multiple times. To detect that, check * that the interface is still on the list * of registered interfaces, and only then * remove and clean it up. */ if (!list_empty(&wpan_dev->list)) { list_del_rcu(&wpan_dev->list); rdev->devlist_generation++; } /* synchronize (so that we won't find this netdev * from other code any more) and then clear the list * head so that the above code can safely check for * !list_empty() to avoid double-cleanup. */ synchronize_rcu(); INIT_LIST_HEAD(&wpan_dev->list); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static struct notifier_block cfg802154_netdev_notifier = { .notifier_call = cfg802154_netdev_notifier_call, }; static void __net_exit cfg802154_pernet_exit(struct net *net) { struct cfg802154_registered_device *rdev; rtnl_lock(); list_for_each_entry(rdev, &cfg802154_rdev_list, list) { if (net_eq(wpan_phy_net(&rdev->wpan_phy), net)) WARN_ON(cfg802154_switch_netns(rdev, &init_net)); } rtnl_unlock(); } static struct pernet_operations cfg802154_pernet_ops = { .exit = cfg802154_pernet_exit, }; static int __init wpan_phy_class_init(void) { int rc; rc = register_pernet_device(&cfg802154_pernet_ops); if (rc) goto err; rc = wpan_phy_sysfs_init(); if (rc) goto err_sysfs; rc = register_netdevice_notifier(&cfg802154_netdev_notifier); if (rc) goto err_nl; rc = ieee802154_nl_init(); if (rc) goto err_notifier; rc = nl802154_init(); if (rc) goto err_ieee802154_nl; return 0; err_ieee802154_nl: ieee802154_nl_exit(); err_notifier: unregister_netdevice_notifier(&cfg802154_netdev_notifier); err_nl: wpan_phy_sysfs_exit(); err_sysfs: unregister_pernet_device(&cfg802154_pernet_ops); err: return rc; } subsys_initcall(wpan_phy_class_init); static void __exit wpan_phy_class_exit(void) { nl802154_exit(); ieee802154_nl_exit(); unregister_netdevice_notifier(&cfg802154_netdev_notifier); wpan_phy_sysfs_exit(); unregister_pernet_device(&cfg802154_pernet_ops); } module_exit(wpan_phy_class_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface"); MODULE_AUTHOR("Dmitry Eremin-Solenikov"); |
70737 17495 70715 14042 14035 36787 36778 36790 89 10801 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 | /* SPDX-License-Identifier: GPL-2.0+ */ /* * Read-Copy Update mechanism for mutual exclusion * * Copyright IBM Corporation, 2001 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * Papers: * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) * * For detailed explanation of Read-Copy Update mechanism see - * http://lse.sourceforge.net/locking/rcupdate.html * */ #ifndef __LINUX_RCUPDATE_H #define __LINUX_RCUPDATE_H #include <linux/types.h> #include <linux/compiler.h> #include <linux/atomic.h> #include <linux/irqflags.h> #include <linux/preempt.h> #include <linux/bottom_half.h> #include <linux/lockdep.h> #include <linux/cleanup.h> #include <asm/processor.h> #include <linux/context_tracking_irq.h> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define RCU_SEQ_CTR_SHIFT 2 #define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); void synchronize_rcu(void); struct rcu_gp_oldstate; unsigned long get_completed_synchronize_rcu(void); void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); // Maximum number of unsigned long values corresponding to // not-yet-completed RCU grace periods. #define NUM_ACTIVE_RCU_POLL_OLDSTATE 2 /** * same_state_synchronize_rcu - Are two old-state values identical? * @oldstate1: First old-state value. * @oldstate2: Second old-state value. * * The two old-state values must have been obtained from either * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or * get_completed_synchronize_rcu(). Returns @true if the two values are * identical and @false otherwise. This allows structures whose lifetimes * are tracked by old-state values to push these values to a list header, * allowing those structures to be slightly smaller. */ static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2) { return oldstate1 == oldstate2; } #ifdef CONFIG_PREEMPT_RCU void __rcu_read_lock(void); void __rcu_read_unlock(void); /* * Defined as a macro as it is a very low level header included from * areas that don't even know about current. This gives the rcu_read_lock() * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_TINY_RCU #define rcu_read_unlock_strict() do { } while (0) #else void rcu_read_unlock_strict(void); #endif static inline void __rcu_read_lock(void) { preempt_disable(); } static inline void __rcu_read_unlock(void) { if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) rcu_read_unlock_strict(); preempt_enable(); } static inline int rcu_preempt_depth(void) { return 0; } #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ #ifdef CONFIG_RCU_LAZY void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func); #else static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func) { call_rcu(head, func); } #endif /* Internal to kernel */ void rcu_init(void); extern int rcu_scheduler_active; void rcu_sched_clock_irq(int user); #ifdef CONFIG_RCU_STALL_COMMON void rcu_sysrq_start(void); void rcu_sysrq_end(void); #else /* #ifdef CONFIG_RCU_STALL_COMMON */ static inline void rcu_sysrq_start(void) { } static inline void rcu_sysrq_end(void) { } #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */ #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) void rcu_irq_work_resched(void); #else static __always_inline void rcu_irq_work_resched(void) { } #endif #ifdef CONFIG_RCU_NOCB_CPU void rcu_init_nohz(void); int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_deoffload(int cpu); void rcu_nocb_flush_deferred_wakeup(void); #define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s) #else /* #ifdef CONFIG_RCU_NOCB_CPU */ static inline void rcu_init_nohz(void) { } static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } static inline void rcu_nocb_flush_deferred_wakeup(void) { } #define RCU_NOCB_LOCKDEP_WARN(c, s) #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /* * Note a quasi-voluntary context switch for RCU-tasks's benefit. * This is a macro rather than an inline function to avoid #include hell. */ #ifdef CONFIG_TASKS_RCU_GENERIC # ifdef CONFIG_TASKS_RCU # define rcu_tasks_classic_qs(t, preempt) \ do { \ if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \ WRITE_ONCE((t)->rcu_tasks_holdout, false); \ } while (0) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); void rcu_tasks_torture_stats_print(char *tt, char *tf); # else # define rcu_tasks_classic_qs(t, preempt) do { } while (0) # define call_rcu_tasks call_rcu # define synchronize_rcu_tasks synchronize_rcu # endif # ifdef CONFIG_TASKS_TRACE_RCU // Bits for ->trc_reader_special.b.need_qs field. #define TRC_NEED_QS 0x1 // Task needs a quiescent state. #define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state. u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new); void rcu_tasks_trace_qs_blkd(struct task_struct *t); # define rcu_tasks_trace_qs(t) \ do { \ int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \ \ if (unlikely(READ_ONCE((t)->trc_reader_special.b.need_qs) == TRC_NEED_QS) && \ likely(!___rttq_nesting)) { \ rcu_trc_cmpxchg_need_qs((t), TRC_NEED_QS, TRC_NEED_QS_CHECKED); \ } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \ !READ_ONCE((t)->trc_reader_special.b.blocked)) { \ rcu_tasks_trace_qs_blkd(t); \ } \ } while (0) void rcu_tasks_trace_torture_stats_print(char *tt, char *tf); # else # define rcu_tasks_trace_qs(t) do { } while (0) # endif #define rcu_tasks_qs(t, preempt) \ do { \ rcu_tasks_classic_qs((t), (preempt)); \ rcu_tasks_trace_qs(t); \ } while (0) # ifdef CONFIG_TASKS_RUDE_RCU void synchronize_rcu_tasks_rude(void); void rcu_tasks_rude_torture_stats_print(char *tt, char *tf); # endif #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) void exit_tasks_rcu_start(void); void exit_tasks_rcu_finish(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ #define rcu_tasks_classic_qs(t, preempt) do { } while (0) #define rcu_tasks_qs(t, preempt) do { } while (0) #define rcu_note_voluntary_context_switch(t) do { } while (0) #define call_rcu_tasks call_rcu #define synchronize_rcu_tasks synchronize_rcu static inline void exit_tasks_rcu_start(void) { } static inline void exit_tasks_rcu_finish(void) { } #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */ /** * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period? * * As an accident of implementation, an RCU Tasks Trace grace period also * acts as an RCU grace period. However, this could change at any time. * Code relying on this accident must call this function to verify that * this accident is still happening. * * You have been warned! */ static inline bool rcu_trace_implies_rcu_gp(void) { return true; } /** * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU * * This macro resembles cond_resched(), except that it is defined to * report potential quiescent states to RCU-tasks even if the cond_resched() * machinery were to be shut off, as some advocate for PREEMPTION kernels. */ #define cond_resched_tasks_rcu_qs() \ do { \ rcu_tasks_qs(current, false); \ cond_resched(); \ } while (0) /** * rcu_softirq_qs_periodic - Report RCU and RCU-Tasks quiescent states * @old_ts: jiffies at start of processing. * * This helper is for long-running softirq handlers, such as NAPI threads in * networking. The caller should initialize the variable passed in as @old_ts * at the beginning of the softirq handler. When invoked frequently, this macro * will invoke rcu_softirq_qs() every 100 milliseconds thereafter, which will * provide both RCU and RCU-Tasks quiescent states. Note that this macro * modifies its old_ts argument. * * Because regions of code that have disabled softirq act as RCU read-side * critical sections, this macro should be invoked with softirq (and * preemption) enabled. * * The macro is not needed when CONFIG_PREEMPT_RT is defined. RT kernels would * have more chance to invoke schedule() calls and provide necessary quiescent * states. As a contrast, calling cond_resched() only won't achieve the same * effect because cond_resched() does not provide RCU-Tasks quiescent states. */ #define rcu_softirq_qs_periodic(old_ts) \ do { \ if (!IS_ENABLED(CONFIG_PREEMPT_RT) && \ time_after(jiffies, (old_ts) + HZ / 10)) { \ preempt_disable(); \ rcu_softirq_qs(); \ preempt_enable(); \ (old_ts) = jiffies; \ } \ } while (0) /* * Infrastructure to implement the synchronize_() primitives in * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. */ #if defined(CONFIG_TREE_RCU) #include <linux/rcutree.h> #elif defined(CONFIG_TINY_RCU) #include <linux/rcutiny.h> #else #error "Unknown RCU implementation specified to kernel configuration" #endif /* * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls * are needed for dynamic initialization and destruction of rcu_head * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for * dynamic initialization and destruction of statically allocated rcu_head * structures. However, rcu_head structures allocated dynamically in the * heap don't need any initialization. */ #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD void init_rcu_head(struct rcu_head *head); void destroy_rcu_head(struct rcu_head *head); void init_rcu_head_on_stack(struct rcu_head *head); void destroy_rcu_head_on_stack(struct rcu_head *head); #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ static inline void init_rcu_head(struct rcu_head *head) { } static inline void destroy_rcu_head(struct rcu_head *head) { } static inline void init_rcu_head_on_stack(struct rcu_head *head) { } static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { } #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) bool rcu_lockdep_current_cpu_online(void); #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ static inline bool rcu_lockdep_current_cpu_online(void) { return true; } #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ extern struct lockdep_map rcu_lock_map; extern struct lockdep_map rcu_bh_lock_map; extern struct lockdep_map rcu_sched_lock_map; extern struct lockdep_map rcu_callback_map; #ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void rcu_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); } static inline void rcu_try_lock_acquire(struct lockdep_map *map) { lock_acquire(map, 0, 1, 2, 0, NULL, _THIS_IP_); } static inline void rcu_lock_release(struct lockdep_map *map) { lock_release(map, _THIS_IP_); } int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); int rcu_read_lock_bh_held(void); int rcu_read_lock_sched_held(void); int rcu_read_lock_any_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ # define rcu_lock_acquire(a) do { } while (0) # define rcu_try_lock_acquire(a) do { } while (0) # define rcu_lock_release(a) do { } while (0) static inline int rcu_read_lock_held(void) { return 1; } static inline int rcu_read_lock_bh_held(void) { return 1; } static inline int rcu_read_lock_sched_held(void) { return !preemptible(); } static inline int rcu_read_lock_any_held(void) { return !preemptible(); } static inline int debug_lockdep_rcu_enabled(void) { return 0; } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #ifdef CONFIG_PROVE_RCU /** * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met * @c: condition to check * @s: informative message * * This checks debug_lockdep_rcu_enabled() before checking (c) to * prevent early boot splats due to lockdep not yet being initialized, * and rechecks it after checking (c) to prevent false-positive splats * due to races with lockdep being disabled. See commit 3066820034b5dd * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail. */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ static bool __section(".data..unlikely") __warned; \ if (debug_lockdep_rcu_enabled() && (c) && \ debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ lockdep_rcu_suspicious(__FILE__, __LINE__, s); \ } \ } while (0) #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal context switch in RCU read-side critical section"); } #else // #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { } #endif // #else // #ifndef CONFIG_PREEMPT_RCU #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ } while (0) // See RCU_LOCKDEP_WARN() for an explanation of the double call to // debug_lockdep_rcu_enabled(). static inline bool lockdep_assert_rcu_helper(bool c) { return debug_lockdep_rcu_enabled() && (c || !rcu_is_watching() || !rcu_lockdep_current_cpu_online()) && debug_lockdep_rcu_enabled(); } /** * lockdep_assert_in_rcu_read_lock - WARN if not protected by rcu_read_lock() * * Splats if lockdep is enabled and there is no rcu_read_lock() in effect. */ #define lockdep_assert_in_rcu_read_lock() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map))) /** * lockdep_assert_in_rcu_read_lock_bh - WARN if not protected by rcu_read_lock_bh() * * Splats if lockdep is enabled and there is no rcu_read_lock_bh() in effect. * Note that local_bh_disable() and friends do not suffice here, instead an * actual rcu_read_lock_bh() is required. */ #define lockdep_assert_in_rcu_read_lock_bh() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_bh_lock_map))) /** * lockdep_assert_in_rcu_read_lock_sched - WARN if not protected by rcu_read_lock_sched() * * Splats if lockdep is enabled and there is no rcu_read_lock_sched() * in effect. Note that preempt_disable() and friends do not suffice here, * instead an actual rcu_read_lock_sched() is required. */ #define lockdep_assert_in_rcu_read_lock_sched() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_sched_lock_map))) /** * lockdep_assert_in_rcu_reader - WARN if not within some type of RCU reader * * Splats if lockdep is enabled and there is no RCU reader of any * type in effect. Note that regions of code protected by things like * preempt_disable, local_bh_disable(), and local_irq_disable() all qualify * as RCU readers. * * Note that this will never trigger in PREEMPT_NONE or PREEMPT_VOLUNTARY * kernels that are not also built with PREEMPT_COUNT. But if you have * lockdep enabled, you might as well also enable PREEMPT_COUNT. */ #define lockdep_assert_in_rcu_reader() \ WARN_ON_ONCE(lockdep_assert_rcu_helper(!lock_is_held(&rcu_lock_map) && \ !lock_is_held(&rcu_bh_lock_map) && \ !lock_is_held(&rcu_sched_lock_map) && \ preemptible())) #else /* #ifdef CONFIG_PROVE_RCU */ #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c)) #define rcu_sleep_check() do { } while (0) #define lockdep_assert_in_rcu_read_lock() do { } while (0) #define lockdep_assert_in_rcu_read_lock_bh() do { } while (0) #define lockdep_assert_in_rcu_read_lock_sched() do { } while (0) #define lockdep_assert_in_rcu_reader() do { } while (0) #endif /* #else #ifdef CONFIG_PROVE_RCU */ /* * Helper functions for rcu_dereference_check(), rcu_dereference_protected() * and rcu_assign_pointer(). Some of these could be folded into their * callers, but they are left separate in order to ease introduction of * multiple pointers markings to match different RCU implementations * (e.g., __srcu), should this make sense in the future. */ #ifdef __CHECKER__ #define rcu_check_sparse(p, space) \ ((void)(((typeof(*p) space *)p) == p)) #else /* #ifdef __CHECKER__ */ #define rcu_check_sparse(p, space) #endif /* #else #ifdef __CHECKER__ */ #define __unrcu_pointer(p, local) \ ({ \ typeof(*p) *local = (typeof(*p) *__force)(p); \ rcu_check_sparse(p, __rcu); \ ((typeof(*p) __force __kernel *)(local)); \ }) /** * unrcu_pointer - mark a pointer as not being RCU protected * @p: pointer needing to lose its __rcu property * * Converts @p from an __rcu pointer to a __kernel pointer. * This allows an __rcu pointer to be used with xchg() and friends. */ #define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu)) #define __rcu_access_pointer(p, local, space) \ ({ \ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define __rcu_dereference_check(p, local, c, space) \ ({ \ /* Dependency order vs. p above. */ \ typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define __rcu_dereference_protected(p, local, c, space) \ ({ \ RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ rcu_check_sparse(p, space); \ ((typeof(*p) __force __kernel *)(p)); \ }) #define __rcu_dereference_raw(p, local) \ ({ \ /* Dependency order vs. p above. */ \ typeof(p) local = READ_ONCE(p); \ ((typeof(*p) __force __kernel *)(local)); \ }) #define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu)) /** * RCU_INITIALIZER() - statically initialize an RCU-protected global variable * @v: The value to statically initialize with. */ #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) /** * rcu_assign_pointer() - assign to RCU-protected pointer * @p: pointer to assign to * @v: value to assign (publish) * * Assigns the specified value to the specified RCU-protected * pointer, ensuring that any concurrent RCU readers will see * any prior initialization. * * Inserts memory barriers on architectures that require them * (which is most of them), and also prevents the compiler from * reordering the code that initializes the structure after the pointer * assignment. More importantly, this call documents which pointers * will be dereferenced by RCU read-side code. * * In some special cases, you may use RCU_INIT_POINTER() instead * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due * to the fact that it does not constrain either the CPU or the compiler. * That said, using RCU_INIT_POINTER() when you should have used * rcu_assign_pointer() is a very bad thing that results in * impossible-to-diagnose memory corruption. So please be careful. * See the RCU_INIT_POINTER() comment header for details. * * Note that rcu_assign_pointer() evaluates each of its arguments only * once, appearances notwithstanding. One of the "extra" evaluations * is in typeof() and the other visible only to sparse (__CHECKER__), * neither of which actually execute the argument. As with most cpp * macros, this execute-arguments-only-once property is important, so * please be careful when making changes to rcu_assign_pointer() and the * other macros that it invokes. */ #define rcu_assign_pointer(p, v) \ do { \ uintptr_t _r_a_p__v = (uintptr_t)(v); \ rcu_check_sparse(p, __rcu); \ \ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ else \ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ } while (0) /** * rcu_replace_pointer() - replace an RCU pointer, returning its old value * @rcu_ptr: RCU pointer, whose old value is returned * @ptr: regular pointer * @c: the lockdep conditions under which the dereference will take place * * Perform a replacement, where @rcu_ptr is an RCU-annotated * pointer and @c is the lockdep argument that is passed to the * rcu_dereference_protected() call used to read that pointer. The old * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr. */ #define rcu_replace_pointer(rcu_ptr, ptr, c) \ ({ \ typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ __tmp; \ }) /** * rcu_access_pointer() - fetch RCU pointer with no dereferencing * @p: The pointer to read * * Return the value of the specified RCU-protected pointer, but omit the * lockdep checks for being in an RCU read-side critical section. This is * useful when the value of this pointer is accessed, but the pointer is * not dereferenced, for example, when testing an RCU-protected pointer * against NULL. Although rcu_access_pointer() may also be used in cases * where update-side locks prevent the value of the pointer from changing, * you should instead use rcu_dereference_protected() for this use case. * Within an RCU read-side critical section, there is little reason to * use rcu_access_pointer(). * * It is usually best to test the rcu_access_pointer() return value * directly in order to avoid accidental dereferences being introduced * by later inattentive changes. In other words, assigning the * rcu_access_pointer() return value to a local variable results in an * accident waiting to happen. * * It is also permissible to use rcu_access_pointer() when read-side * access to the pointer was removed at least one grace period ago, as is * the case in the context of the RCU callback that is freeing up the data, * or after a synchronize_rcu() returns. This can be useful when tearing * down multi-linked structures after a grace period has elapsed. However, * rcu_dereference_protected() is normally preferred for this use case. */ #define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu) /** * rcu_dereference_check() - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Do an rcu_dereference(), but check that the conditions under which the * dereference will take place are correct. Typically the conditions * indicate the various locking conditions that should be held at that * point. The check should return true if the conditions are satisfied. * An implicit check for being in an RCU read-side critical section * (rcu_read_lock()) is included. * * For example: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); * * could be used to indicate to lockdep that foo->bar may only be dereferenced * if either rcu_read_lock() is held, or that the lock required to replace * the bar struct at foo->bar is held. * * Note that the list of conditions may also include indications of when a lock * need not be held, for example during initialisation or destruction of the * target struct: * * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || * atomic_read(&foo->usage) == 0); * * Inserts memory barriers on architectures that require them * (currently only the Alpha), prevents the compiler from refetching * (and from merging fetches), and, more importantly, documents exactly * which pointers are protected by RCU and checks that the pointer is * annotated as __rcu. */ #define rcu_dereference_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_held(), __rcu) /** * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-bh counterpart to rcu_dereference_check(). However, * please note that starting in v5.0 kernels, vanilla RCU grace periods * wait for local_bh_disable() regions of code in addition to regions of * code demarked by rcu_read_lock() and rcu_read_unlock(). This means * that synchronize_rcu(), call_rcu, and friends all take not only * rcu_read_lock() but also rcu_read_lock_bh() into account. */ #define rcu_dereference_bh_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_bh_held(), __rcu) /** * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * This is the RCU-sched counterpart to rcu_dereference_check(). * However, please note that starting in v5.0 kernels, vanilla RCU grace * periods wait for preempt_disable() regions of code in addition to * regions of code demarked by rcu_read_lock() and rcu_read_unlock(). * This means that synchronize_rcu(), call_rcu, and friends all take not * only rcu_read_lock() but also rcu_read_lock_sched() into account. */ #define rcu_dereference_sched_check(p, c) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), \ (c) || rcu_read_lock_sched_held(), \ __rcu) /* * The tracing infrastructure traces RCU (we want that), but unfortunately * some of the RCU checks causes tracing to lock up the system. * * The no-tracing version of rcu_dereference_raw() must not call * rcu_read_lock_held(). */ #define rcu_dereference_raw_check(p) \ __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu) /** * rcu_dereference_protected() - fetch RCU pointer when updates prevented * @p: The pointer to read, prior to dereferencing * @c: The conditions under which the dereference will take place * * Return the value of the specified RCU-protected pointer, but omit * the READ_ONCE(). This is useful in cases where update-side locks * prevent the value of the pointer from changing. Please note that this * primitive does *not* prevent the compiler from repeating this reference * or combining it with other references, so it should not be used without * protection of appropriate locks. * * This function is only for update-side use. Using this function * when protected only by rcu_read_lock() will result in infrequent * but very ugly failures. */ #define rcu_dereference_protected(p, c) \ __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu) /** * rcu_dereference() - fetch RCU-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * This is a simple wrapper around rcu_dereference_check(). */ #define rcu_dereference(p) rcu_dereference_check(p, 0) /** * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) /** * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing * @p: The pointer to read, prior to dereferencing * * Makes rcu_dereference_check() do the dirty work. */ #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) /** * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism * @p: The pointer to hand off * * This is simply an identity function, but it documents where a pointer * is handed off from RCU to some other synchronization mechanism, for * example, reference counting or locking. In C11, it would map to * kill_dependency(). It could be used as follows:: * * rcu_read_lock(); * p = rcu_dereference(gp); * long_lived = is_long_lived(p); * if (long_lived) { * if (!atomic_inc_not_zero(p->refcnt)) * long_lived = false; * else * p = rcu_pointer_handoff(p); * } * rcu_read_unlock(); */ #define rcu_pointer_handoff(p) (p) /** * rcu_read_lock() - mark the beginning of an RCU read-side critical section * * When synchronize_rcu() is invoked on one CPU while other CPUs * are within RCU read-side critical sections, then the * synchronize_rcu() is guaranteed to block until after all the other * CPUs exit their critical sections. Similarly, if call_rcu() is invoked * on one CPU while other CPUs are within RCU read-side critical * sections, invocation of the corresponding RCU callback is deferred * until after the all the other CPUs exit their critical sections. * * Both synchronize_rcu() and call_rcu() also wait for regions of code * with preemption disabled, including regions of code with interrupts or * softirqs disabled. * * Note, however, that RCU callbacks are permitted to run concurrently * with new RCU read-side critical sections. One way that this can happen * is via the following sequence of events: (1) CPU 0 enters an RCU * read-side critical section, (2) CPU 1 invokes call_rcu() to register * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU * callback is invoked. This is legal, because the RCU read-side critical * section that was running concurrently with the call_rcu() (and which * therefore might be referencing something that the corresponding RCU * callback would free up) has completed before the corresponding * RCU callback is invoked. * * RCU read-side critical sections may be nested. Any deferred actions * will be deferred until the outermost RCU read-side critical section * completes. * * You can avoid reading and understanding the next paragraph by * following this rule: don't put anything in an rcu_read_lock() RCU * read-side critical section that would block in a !PREEMPTION kernel. * But if you want the full story, read on! * * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU), * it is illegal to block while in an RCU read-side critical section. * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION * kernel builds, RCU read-side critical sections may be preempted, * but explicit blocking is illegal. Finally, in preemptible RCU * implementations in real-time (with -rt patchset) kernel builds, RCU * read-side critical sections may be preempted and they may also block, but * only when acquiring spinlocks that are subject to priority inheritance. */ static __always_inline void rcu_read_lock(void) { __rcu_read_lock(); __acquire(RCU); rcu_lock_acquire(&rcu_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock() used illegally while idle"); } /* * So where is rcu_write_lock()? It does not exist, as there is no * way for writers to lock out RCU readers. This is a feature, not * a bug -- this property is what provides RCU's performance benefits. * Of course, writers must coordinate with each other. The normal * spinlock primitives work well for this, but any other technique may be * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * * In almost all situations, rcu_read_unlock() is immune from deadlock. * This deadlock immunity also extends to the scheduler's runqueue * and priority-inheritance spinlocks, courtesy of the quiescent-state * deferral that is carried out when rcu_read_unlock() is invoked with * interrupts disabled. * * See rcu_read_lock() for more information. */ static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ __release(RCU); __rcu_read_unlock(); } /** * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * * This is equivalent to rcu_read_lock(), but also disables softirqs. * Note that anything else that disables softirqs can also serve as an RCU * read-side critical section. However, please note that this equivalence * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and * rcu_read_lock_bh() were unrelated. * * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh() * was invoked from some other task. */ static inline void rcu_read_lock_bh(void) { local_bh_disable(); __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); } /** * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ static inline void rcu_read_unlock_bh(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); local_bh_enable(); } /** * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section * * This is equivalent to rcu_read_lock(), but also disables preemption. * Read-side critical sections can also be introduced by anything else that * disables preemption, including local_irq_disable() and friends. However, * please note that the equivalence to rcu_read_lock() applies only to * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched() * were unrelated. * * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched() * must occur in the same context, for example, it is illegal to invoke * rcu_read_unlock_sched() from process context if the matching * rcu_read_lock_sched() was invoked from an NMI handler. */ static inline void rcu_read_lock_sched(void) { preempt_disable(); __acquire(RCU_SCHED); rcu_lock_acquire(&rcu_sched_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_sched() used illegally while idle"); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_lock_sched_notrace(void) { preempt_disable_notrace(); __acquire(RCU_SCHED); } /** * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section * * See rcu_read_lock_sched() for more information. */ static inline void rcu_read_unlock_sched(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_sched() used illegally while idle"); rcu_lock_release(&rcu_sched_lock_map); __release(RCU_SCHED); preempt_enable(); } /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */ static inline notrace void rcu_read_unlock_sched_notrace(void) { __release(RCU_SCHED); preempt_enable_notrace(); } /** * RCU_INIT_POINTER() - initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * Initialize an RCU-protected pointer in special cases where readers * do not need ordering constraints on the CPU or the compiler. These * special cases are: * * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* * 2. The caller has taken whatever steps are required to prevent * RCU readers from concurrently accessing this pointer *or* * 3. The referenced data structure has already been exposed to * readers either at compile time or via rcu_assign_pointer() *and* * * a. You have not made *any* reader-visible changes to * this structure since then *or* * b. It is OK for readers accessing this structure from its * new location to see the old state of the structure. (For * example, the changes were to statistical counters or to * other state where exact synchronization is not required.) * * Failure to follow these rules governing use of RCU_INIT_POINTER() will * result in impossible-to-diagnose memory corruption. As in the structures * will look OK in crash dumps, but any concurrent RCU readers might * see pre-initialized values of the referenced data structure. So * please be very careful how you use RCU_INIT_POINTER()!!! * * If you are creating an RCU-protected linked structure that is accessed * by a single external-to-structure RCU-protected pointer, then you may * use RCU_INIT_POINTER() to initialize the internal RCU-protected * pointers, but you must use rcu_assign_pointer() to initialize the * external-to-structure pointer *after* you have completely initialized * the reader-accessible portions of the linked structure. * * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no * ordering guarantees for either the CPU or the compiler. */ #define RCU_INIT_POINTER(p, v) \ do { \ rcu_check_sparse(p, __rcu); \ WRITE_ONCE(p, RCU_INITIALIZER(v)); \ } while (0) /** * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer * @p: The pointer to be initialized. * @v: The value to initialized the pointer to. * * GCC-style initialization for an RCU-protected pointer in a structure field. */ #define RCU_POINTER_INITIALIZER(p, v) \ .p = RCU_INITIALIZER(v) /** * kfree_rcu() - kfree an object after a grace period. * @ptr: pointer to kfree for double-argument invocations. * @rhf: the name of the struct rcu_head within the type of @ptr. * * Many rcu callbacks functions just call kfree() on the base structure. * These functions are trivial, but their size adds up, and furthermore * when they are used in a kernel module, that module must invoke the * high-latency rcu_barrier() function at module-unload time. * * The kfree_rcu() function handles this issue. In order to have a universal * callback function handling different offsets of rcu_head, the callback needs * to determine the starting address of the freed object, which can be a large * kmalloc or vmalloc allocation. To allow simply aligning the pointer down to * page boundary for those, only offsets up to 4095 bytes can be accommodated. * If the offset is larger than 4095 bytes, a compile-time error will * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can * either fall back to use of call_rcu() or rearrange the structure to * position the rcu_head structure into the first 4096 bytes. * * The object to be freed can be allocated either by kmalloc() or * kmem_cache_alloc(). * * Note that the allowable offset might decrease in the future. * * The BUILD_BUG_ON check must not involve any function calls, hence the * checks are done in macros here. */ #define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) #define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf) /** * kfree_rcu_mightsleep() - kfree an object after a grace period. * @ptr: pointer to kfree for single-argument invocations. * * When it comes to head-less variant, only one argument * is passed and that is just a pointer which has to be * freed after a grace period. Therefore the semantic is * * kfree_rcu_mightsleep(ptr); * * where @ptr is the pointer to be freed by kvfree(). * * Please note, head-less way of freeing is permitted to * use from a context that has to follow might_sleep() * annotation. Otherwise, please switch and embed the * rcu_head structure within the type of @ptr. */ #define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) #define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) /* * In mm/slab_common.c, no suitable header to include here. */ void kvfree_call_rcu(struct rcu_head *head, void *ptr); /* * The BUILD_BUG_ON() makes sure the rcu_head offset can be handled. See the * comment of kfree_rcu() for details. */ #define kvfree_rcu_arg_2(ptr, rhf) \ do { \ typeof (ptr) ___p = (ptr); \ \ if (___p) { \ BUILD_BUG_ON(offsetof(typeof(*(ptr)), rhf) >= 4096); \ kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \ } \ } while (0) #define kvfree_rcu_arg_1(ptr) \ do { \ typeof(ptr) ___p = (ptr); \ \ if (___p) \ kvfree_call_rcu(NULL, (void *) (___p)); \ } while (0) /* * Place this after a lock-acquisition primitive to guarantee that * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies * if the UNLOCK and LOCK are executed by the same CPU or if the * UNLOCK and LOCK operate on the same lock variable. */ #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */ #else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ #define smp_mb__after_unlock_lock() do { } while (0) #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */ /* Has the specified rcu_head structure been handed to call_rcu()? */ /** * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() * @rhp: The rcu_head structure to initialize. * * If you intend to invoke rcu_head_after_call_rcu() to test whether a * given rcu_head structure has already been passed to call_rcu(), then * you must also invoke this rcu_head_init() function on it just after * allocating that structure. Calls to this function must not race with * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation. */ static inline void rcu_head_init(struct rcu_head *rhp) { rhp->func = (rcu_callback_t)~0L; } /** * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()? * @rhp: The rcu_head structure to test. * @f: The function passed to call_rcu() along with @rhp. * * Returns @true if the @rhp has been passed to call_rcu() with @func, * and @false otherwise. Emits a warning in any other case, including * the case where @rhp has already been invoked after a grace period. * Calls to this function must not race with callback invocation. One way * to avoid such races is to enclose the call to rcu_head_after_call_rcu() * in an RCU read-side critical section that includes a read-side fetch * of the pointer to the structure containing @rhp. */ static inline bool rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) { rcu_callback_t func = READ_ONCE(rhp->func); if (func == f) return true; WARN_ON_ONCE(func != (rcu_callback_t)~0L); return false; } /* kernel/ksysfs.c definitions */ extern int rcu_expedited; extern int rcu_normal; DEFINE_LOCK_GUARD_0(rcu, do { rcu_read_lock(); /* * sparse doesn't call the cleanup function, * so just release immediately and don't track * the context. We don't need to anyway, since * the whole point of the guard is to not need * the explicit unlock. */ __release(RCU); } while (0), rcu_read_unlock()) #endif /* __LINUX_RCUPDATE_H */ |
182 182 4 178 21 178 178 145 30 200 1 7 192 35 35 29 34 6 29 2 14 25 29 25 4 28 2 28 35 1 4 4 2 4 4 19 4 4 4 2 1 4 1 1 1 26 26 2 23 25 4 25 9 16 95 96 19 25 25 25 25 24 72 62 63 4 59 5 5 21 21 21 11 11 6 13 13 28 1 14 5 3 20 11 17 5 24 46 46 1 28 2 17 13 2 2 13 3 11 7 6 46 16 2 14 3 11 5 10 7 193 192 194 2 189 2 1 194 2 6 2 2 4 4 3 3 1 1 12 2 11 5 5 3 2 3 2 5 5 5 7 7 7 4 4 199 62 199 199 198 63 62 59 7 10 4 1 1 1 1 7 7 7 7 7 7 7 7 7 7 11 13 13 12 187 1 188 187 187 14 1 170 14 5 185 180 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 | /* * hugetlbpage-backed filesystem. Based on ramfs. * * Nadia Yvette Chambers, 2002 * * Copyright (C) 2002 Linus Torvalds. * License: GPL */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/thread_info.h> #include <asm/current.h> #include <linux/falloc.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/file.h> #include <linux/kernel.h> #include <linux/writeback.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/string.h> #include <linux/capability.h> #include <linux/ctype.h> #include <linux/backing-dev.h> #include <linux/hugetlb.h> #include <linux/pagevec.h> #include <linux/fs_parser.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/dnotify.h> #include <linux/statfs.h> #include <linux/security.h> #include <linux/magic.h> #include <linux/migrate.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/sched/mm.h> #define CREATE_TRACE_POINTS #include <trace/events/hugetlbfs.h> static const struct address_space_operations hugetlbfs_aops; static const struct file_operations hugetlbfs_file_operations; static const struct inode_operations hugetlbfs_dir_inode_operations; static const struct inode_operations hugetlbfs_inode_operations; enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; struct hugetlbfs_fs_context { struct hstate *hstate; unsigned long long max_size_opt; unsigned long long min_size_opt; long max_hpages; long nr_inodes; long min_hpages; enum hugetlbfs_size_type max_val_type; enum hugetlbfs_size_type min_val_type; kuid_t uid; kgid_t gid; umode_t mode; }; int sysctl_hugetlb_shm_group; enum hugetlb_param { Opt_gid, Opt_min_size, Opt_mode, Opt_nr_inodes, Opt_pagesize, Opt_size, Opt_uid, }; static const struct fs_parameter_spec hugetlb_fs_parameters[] = { fsparam_gid ("gid", Opt_gid), fsparam_string("min_size", Opt_min_size), fsparam_u32oct("mode", Opt_mode), fsparam_string("nr_inodes", Opt_nr_inodes), fsparam_string("pagesize", Opt_pagesize), fsparam_string("size", Opt_size), fsparam_uid ("uid", Opt_uid), {} }; /* * Mask used when checking the page offset value passed in via system * calls. This value will be converted to a loff_t which is signed. * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the * value. The extra bit (- 1 in the shift value) is to take the sign * bit into account. */ #define PGOFF_LOFFT_MAX \ (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); vm_flags_t vm_flags; /* * vma address alignment (but not the pgoff alignment) has * already been checked by prepare_hugepage_range. If you add * any error returns here, do so after setting VM_HUGETLB, so * is_vm_hugetlb_page tests below unmap_region go the right * way when do_mmap unwinds (may be important on powerpc * and ia64). */ vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); vma->vm_ops = &hugetlb_vm_ops; /* * page based offset in vm_pgoff could be sufficiently large to * overflow a loff_t when converted to byte offset. This can * only happen on architectures where sizeof(loff_t) == * sizeof(unsigned long). So, only check in those instances. */ if (sizeof(unsigned long) == sizeof(loff_t)) { if (vma->vm_pgoff & PGOFF_LOFFT_MAX) return -EINVAL; } /* must be huge page aligned */ if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; vma_len = (loff_t)(vma->vm_end - vma->vm_start); len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); /* check for overflow */ if (len < vma_len) return -EINVAL; inode_lock(inode); file_accessed(file); ret = -ENOMEM; vm_flags = vma->vm_flags; /* * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip * reserving here. Note: only for SHM hugetlbfs file, the inode * flag S_PRIVATE is set. */ if (inode->i_flags & S_PRIVATE) vm_flags |= VM_NORESERVE; if (!hugetlb_reserve_pages(inode, vma->vm_pgoff >> huge_page_order(h), len >> huge_page_shift(h), vma, vm_flags)) goto out; ret = 0; if (vma->vm_flags & VM_WRITE && inode->i_size < len) i_size_write(inode, len); out: inode_unlock(inode); return ret; } /* * Called under mmap_write_lock(mm). */ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long addr0 = 0; struct hstate *h = hstate_file(file); if (len & ~huge_page_mask(h)) return -EINVAL; if (flags & MAP_FIXED) { if (addr & ~huge_page_mask(h)) return -EINVAL; if (prepare_hugepage_range(file, addr, len)) return -EINVAL; } if (addr) addr0 = ALIGN(addr, huge_page_size(h)); return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff, flags, 0); } /* * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. * Returns the maximum number of bytes one can read without touching the 1st raw * HWPOISON page. * * The implementation borrows the iteration logic from copy_page_to_iter*. */ static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, size_t bytes) { struct page *page; size_t n = 0; size_t res = 0; /* First page to start the loop. */ page = folio_page(folio, offset / PAGE_SIZE); offset %= PAGE_SIZE; while (1) { if (is_raw_hwpoison_page_in_hugepage(page)) break; /* Safe to read n bytes without touching HWPOISON subpage. */ n = min(bytes, (size_t)PAGE_SIZE - offset); res += n; bytes -= n; if (!bytes || !n) break; offset += n; if (offset == PAGE_SIZE) { page = nth_page(page, 1); offset = 0; } } return res; } /* * Support for read() - Find the page attached to f_mapping and copy out the * data. This provides functionality similar to filemap_read(). */ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct hstate *h = hstate_file(file); struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; unsigned long index = iocb->ki_pos >> huge_page_shift(h); unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); unsigned long end_index; loff_t isize; ssize_t retval = 0; while (iov_iter_count(to)) { struct folio *folio; size_t nr, copied, want; /* nr is the maximum number of bytes to copy from this page */ nr = huge_page_size(h); isize = i_size_read(inode); if (!isize) break; end_index = (isize - 1) >> huge_page_shift(h); if (index > end_index) break; if (index == end_index) { nr = ((isize - 1) & ~huge_page_mask(h)) + 1; if (nr <= offset) break; } nr = nr - offset; /* Find the folio */ folio = filemap_lock_hugetlb_folio(h, mapping, index); if (IS_ERR(folio)) { /* * We have a HOLE, zero out the user-buffer for the * length of the hole or request. */ copied = iov_iter_zero(nr, to); } else { folio_unlock(folio); if (!folio_test_hwpoison(folio)) want = nr; else { /* * Adjust how many bytes safe to read without * touching the 1st raw HWPOISON page after * offset. */ want = adjust_range_hwpoison(folio, offset, nr); if (want == 0) { folio_put(folio); retval = -EIO; break; } } /* * We have the folio, copy it to user space buffer. */ copied = copy_folio_to_iter(folio, offset, want, to); folio_put(folio); } offset += copied; retval += copied; if (copied != nr && iov_iter_count(to)) { if (!retval) retval = -EFAULT; break; } index += offset >> huge_page_shift(h); offset &= ~huge_page_mask(h); } iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; return retval; } static int hugetlbfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { return -EINVAL; } static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { BUG(); return -EINVAL; } static void hugetlb_delete_from_page_cache(struct folio *folio) { folio_clear_dirty(folio); folio_clear_uptodate(folio); filemap_remove_folio(folio); } /* * Called with i_mmap_rwsem held for inode based vma maps. This makes * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault * mutex for the page in the mapping. So, we can not race with page being * faulted into the vma. */ static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { pte_t *ptep, pte; ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); if (!ptep) return false; pte = huge_ptep_get(vma->vm_mm, addr, ptep); if (huge_pte_none(pte) || !pte_present(pte)) return false; if (pte_pfn(pte) == pfn) return true; return false; } /* * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? * No, because the interval tree returns us only those vmas * which overlap the truncated area starting at pgoff, * and no vma on a 32-bit arch can span beyond the 4GB. */ static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) { unsigned long offset = 0; if (vma->vm_pgoff < start) offset = (start - vma->vm_pgoff) << PAGE_SHIFT; return vma->vm_start + offset; } static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) { unsigned long t_end; if (!end) return vma->vm_end; t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; if (t_end > vma->vm_end) t_end = vma->vm_end; return t_end; } /* * Called with hugetlb fault mutex held. Therefore, no more mappings to * this folio can be created while executing the routine. */ static void hugetlb_unmap_file_folio(struct hstate *h, struct address_space *mapping, struct folio *folio, pgoff_t index) { struct rb_root_cached *root = &mapping->i_mmap; struct hugetlb_vma_lock *vma_lock; unsigned long pfn = folio_pfn(folio); struct vm_area_struct *vma; unsigned long v_start; unsigned long v_end; pgoff_t start, end; start = index * pages_per_huge_page(h); end = (index + 1) * pages_per_huge_page(h); i_mmap_lock_write(mapping); retry: vma_lock = NULL; vma_interval_tree_foreach(vma, root, start, end - 1) { v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) continue; if (!hugetlb_vma_trylock_write(vma)) { vma_lock = vma->vm_private_data; /* * If we can not get vma lock, we need to drop * immap_sema and take locks in order. First, * take a ref on the vma_lock structure so that * we can be guaranteed it will not go away when * dropping immap_sema. */ kref_get(&vma_lock->refs); break; } unmap_hugepage_range(vma, v_start, v_end, NULL, ZAP_FLAG_DROP_MARKER); hugetlb_vma_unlock_write(vma); } i_mmap_unlock_write(mapping); if (vma_lock) { /* * Wait on vma_lock. We know it is still valid as we have * a reference. We must 'open code' vma locking as we do * not know if vma_lock is still attached to vma. */ down_write(&vma_lock->rw_sema); i_mmap_lock_write(mapping); vma = vma_lock->vma; if (!vma) { /* * If lock is no longer attached to vma, then just * unlock, drop our reference and retry looking for * other vmas. */ up_write(&vma_lock->rw_sema); kref_put(&vma_lock->refs, hugetlb_vma_lock_release); goto retry; } /* * vma_lock is still attached to vma. Check to see if vma * still maps page and if so, unmap. */ v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) unmap_hugepage_range(vma, v_start, v_end, NULL, ZAP_FLAG_DROP_MARKER); kref_put(&vma_lock->refs, hugetlb_vma_lock_release); hugetlb_vma_unlock_write(vma); goto retry; } } static void hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, zap_flags_t zap_flags) { struct vm_area_struct *vma; /* * end == 0 indicates that the entire range after start should be * unmapped. Note, end is exclusive, whereas the interval tree takes * an inclusive "last". */ vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { unsigned long v_start; unsigned long v_end; if (!hugetlb_vma_trylock_write(vma)) continue; v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); /* * Note that vma lock only exists for shared/non-private * vmas. Therefore, lock is not held when calling * unmap_hugepage_range for private vmas. */ hugetlb_vma_unlock_write(vma); } } /* * Called with hugetlb fault mutex held. * Returns true if page was actually removed, false otherwise. */ static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, struct address_space *mapping, struct folio *folio, pgoff_t index, bool truncate_op) { bool ret = false; /* * If folio is mapped, it was faulted in after being * unmapped in caller. Unmap (again) while holding * the fault mutex. The mutex will prevent faults * until we finish removing the folio. */ if (unlikely(folio_mapped(folio))) hugetlb_unmap_file_folio(h, mapping, folio, index); folio_lock(folio); /* * We must remove the folio from page cache before removing * the region/ reserve map (hugetlb_unreserve_pages). In * rare out of memory conditions, removal of the region/reserve * map could fail. Correspondingly, the subpool and global * reserve usage count can need to be adjusted. */ VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); hugetlb_delete_from_page_cache(folio); ret = true; if (!truncate_op) { if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) hugetlb_fix_reserve_counts(inode); } folio_unlock(folio); return ret; } /* * remove_inode_hugepages handles two distinct cases: truncation and hole * punch. There are subtle differences in operation for each case. * * truncation is indicated by end of range being LLONG_MAX * In this case, we first scan the range and release found pages. * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve * maps and global counts. Page faults can race with truncation. * During faults, hugetlb_no_page() checks i_size before page allocation, * and again after obtaining page table lock. It will 'back out' * allocations in the truncated range. * hole punch is indicated if end is not LLONG_MAX * In the hole punch case we scan the range and release found pages. * Only when releasing a page is the associated region/reserve map * deleted. The region/reserve map for ranges without associated * pages are not modified. Page faults can race with hole punch. * This is indicated if we find a mapped page. * Note: If the passed end of range value is beyond the end of file, but * not LLONG_MAX this routine still performs a hole punch operation. */ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, loff_t lend) { struct hstate *h = hstate_inode(inode); struct address_space *mapping = &inode->i_data; const pgoff_t end = lend >> PAGE_SHIFT; struct folio_batch fbatch; pgoff_t next, index; int i, freed = 0; bool truncate_op = (lend == LLONG_MAX); folio_batch_init(&fbatch); next = lstart >> PAGE_SHIFT; while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); ++i) { struct folio *folio = fbatch.folios[i]; u32 hash = 0; index = folio->index >> huge_page_order(h); hash = hugetlb_fault_mutex_hash(mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* * Remove folio that was part of folio_batch. */ if (remove_inode_single_folio(h, inode, mapping, folio, index, truncate_op)) freed++; mutex_unlock(&hugetlb_fault_mutex_table[hash]); } folio_batch_release(&fbatch); cond_resched(); } if (truncate_op) (void)hugetlb_unreserve_pages(inode, lstart >> huge_page_shift(h), LONG_MAX, freed); } static void hugetlbfs_evict_inode(struct inode *inode) { struct resv_map *resv_map; trace_hugetlbfs_evict_inode(inode); remove_inode_hugepages(inode, 0, LLONG_MAX); /* * Get the resv_map from the address space embedded in the inode. * This is the address space which points to any resv_map allocated * at inode creation time. If this is a device special inode, * i_mapping may not point to the original address space. */ resv_map = (struct resv_map *)(&inode->i_data)->i_private_data; /* Only regular and link inodes have associated reserve maps */ if (resv_map) resv_map_release(&resv_map->refs); clear_inode(inode); } static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) { pgoff_t pgoff; struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); BUG_ON(offset & ~huge_page_mask(h)); pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); i_mmap_lock_write(mapping); if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, ZAP_FLAG_DROP_MARKER); i_mmap_unlock_write(mapping); remove_inode_hugepages(inode, offset, LLONG_MAX); } static void hugetlbfs_zero_partial_page(struct hstate *h, struct address_space *mapping, loff_t start, loff_t end) { pgoff_t idx = start >> huge_page_shift(h); struct folio *folio; folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) return; start = start & ~huge_page_mask(h); end = end & ~huge_page_mask(h); if (!end) end = huge_page_size(h); folio_zero_segment(folio, (size_t)start, (size_t)end); folio_unlock(folio); folio_put(folio); } static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) { struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); loff_t hpage_size = huge_page_size(h); loff_t hole_start, hole_end; /* * hole_start and hole_end indicate the full pages within the hole. */ hole_start = round_up(offset, hpage_size); hole_end = round_down(offset + len, hpage_size); inode_lock(inode); /* protected by i_rwsem */ if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { inode_unlock(inode); return -EPERM; } i_mmap_lock_write(mapping); /* If range starts before first full page, zero partial page. */ if (offset < hole_start) hugetlbfs_zero_partial_page(h, mapping, offset, min(offset + len, hole_start)); /* Unmap users of full pages in the hole. */ if (hole_end > hole_start) { if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, hole_start >> PAGE_SHIFT, hole_end >> PAGE_SHIFT, 0); } /* If range extends beyond last full page, zero partial page. */ if ((offset + len) > hole_end && (offset + len) > hole_start) hugetlbfs_zero_partial_page(h, mapping, hole_end, offset + len); i_mmap_unlock_write(mapping); /* Remove full pages from the file. */ if (hole_end > hole_start) remove_inode_hugepages(inode, hole_start, hole_end); inode_unlock(inode); return 0; } static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); struct vm_area_struct pseudo_vma; struct mm_struct *mm = current->mm; loff_t hpage_size = huge_page_size(h); unsigned long hpage_shift = huge_page_shift(h); pgoff_t start, index, end; int error; u32 hash; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) { error = hugetlbfs_punch_hole(inode, offset, len); goto out_nolock; } /* * Default preallocate case. * For this range, start is rounded down and end is rounded up * as well as being converted to page offsets. */ start = offset >> hpage_shift; end = (offset + len + hpage_size - 1) >> hpage_shift; inode_lock(inode); /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ error = inode_newsize_ok(inode, offset + len); if (error) goto out; if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { error = -EPERM; goto out; } /* * Initialize a pseudo vma as this is required by the huge page * allocation routines. */ vma_init(&pseudo_vma, mm); vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pseudo_vma.vm_file = file; for (index = start; index < end; index++) { /* * This is supposed to be the vaddr where the page is being * faulted in, but we have no vaddr here. */ struct folio *folio; unsigned long addr; cond_resched(); /* * fallocate(2) manpage permits EINTR; we may have been * interrupted because we are using up too much memory. */ if (signal_pending(current)) { error = -EINTR; break; } /* addr is the offset within the file (zero based) */ addr = index * hpage_size; /* mutex taken here, fault path and hole punch */ hash = hugetlb_fault_mutex_hash(mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ folio = filemap_get_folio(mapping, index << huge_page_order(h)); if (!IS_ERR(folio)) { folio_put(folio); mutex_unlock(&hugetlb_fault_mutex_table[hash]); continue; } /* * Allocate folio without setting the avoid_reserve argument. * There certainly are no reserves associated with the * pseudo_vma. However, there could be shared mappings with * reserves for the file at the inode level. If we fallocate * folios in these areas, we need to consume the reserves * to keep reservation accounting consistent. */ folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); if (IS_ERR(folio)) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); error = PTR_ERR(folio); goto out; } folio_zero_user(folio, addr); __folio_mark_uptodate(folio); error = hugetlb_add_to_page_cache(folio, mapping, index); if (unlikely(error)) { restore_reserve_on_error(h, &pseudo_vma, addr, folio); folio_put(folio); mutex_unlock(&hugetlb_fault_mutex_table[hash]); goto out; } mutex_unlock(&hugetlb_fault_mutex_table[hash]); folio_set_hugetlb_migratable(folio); /* * folio_unlock because locked by hugetlb_add_to_page_cache() * folio_put() due to reference from alloc_hugetlb_folio() */ folio_unlock(folio); folio_put(folio); } if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) i_size_write(inode, offset + len); inode_set_ctime_current(inode); out: inode_unlock(inode); out_nolock: trace_hugetlbfs_fallocate(inode, mode, offset, len, error); return error; } static int hugetlbfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct hstate *h = hstate_inode(inode); int error; unsigned int ia_valid = attr->ia_valid; struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); error = setattr_prepare(idmap, dentry, attr); if (error) return error; trace_hugetlbfs_setattr(inode, dentry, attr); if (ia_valid & ATTR_SIZE) { loff_t oldsize = inode->i_size; loff_t newsize = attr->ia_size; if (newsize & ~huge_page_mask(h)) return -EINVAL; /* protected by i_rwsem */ if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || (newsize > oldsize && (info->seals & F_SEAL_GROW))) return -EPERM; hugetlb_vmtruncate(inode, newsize); } setattr_copy(idmap, inode, attr); mark_inode_dirty(inode); return 0; } static struct inode *hugetlbfs_get_root(struct super_block *sb, struct hugetlbfs_fs_context *ctx) { struct inode *inode; inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode->i_mode = S_IFDIR | ctx->mode; inode->i_uid = ctx->uid; inode->i_gid = ctx->gid; simple_inode_init_ts(inode); inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); lockdep_annotate_inode_mutex_key(inode); } return inode; } /* * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never * be taken from reclaim -- unlike regular filesystems. This needs an * annotation because huge_pmd_share() does an allocation under hugetlb's * i_mmap_rwsem. */ static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; static struct inode *hugetlbfs_get_inode(struct super_block *sb, struct mnt_idmap *idmap, struct inode *dir, umode_t mode, dev_t dev) { struct inode *inode; struct resv_map *resv_map = NULL; /* * Reserve maps are only needed for inodes that can have associated * page allocations. */ if (S_ISREG(mode) || S_ISLNK(mode)) { resv_map = resv_map_alloc(); if (!resv_map) return NULL; } inode = new_inode(sb); if (inode) { struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); inode->i_ino = get_next_ino(); inode_init_owner(idmap, inode, dir, mode); lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, &hugetlbfs_i_mmap_rwsem_key); inode->i_mapping->a_ops = &hugetlbfs_aops; simple_inode_init_ts(inode); inode->i_mapping->i_private_data = resv_map; info->seals = F_SEAL_SEAL; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &hugetlbfs_inode_operations; inode->i_fop = &hugetlbfs_file_operations; break; case S_IFDIR: inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); break; } lockdep_annotate_inode_mutex_key(inode); trace_hugetlbfs_alloc_inode(inode, dir, mode); } else { if (resv_map) kref_put(&resv_map->refs, resv_map_release); } return inode; } /* * File creation. Allocate an inode, and we're done.. */ static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev); if (!inode) return -ENOSPC; inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); d_instantiate(dentry, inode); dget(dentry);/* Extra count - pin the dentry in core */ return 0; } static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { int retval = hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFDIR, 0); if (!retval) inc_nlink(dir); return ERR_PTR(retval); } static int hugetlbfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0); } static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct file *file, umode_t mode) { struct inode *inode; inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0); if (!inode) return -ENOSPC; inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); d_tmpfile(file, inode); return finish_open_simple(file, 0); } static int hugetlbfs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { const umode_t mode = S_IFLNK|S_IRWXUGO; struct inode *inode; int error = -ENOSPC; inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0); if (inode) { int l = strlen(symname)+1; error = page_symlink(inode, symname, l); if (!error) { d_instantiate(dentry, inode); dget(dentry); } else iput(inode); } inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); return error; } #ifdef CONFIG_MIGRATION static int hugetlbfs_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { int rc; rc = migrate_huge_page_move_mapping(mapping, dst, src); if (rc != MIGRATEPAGE_SUCCESS) return rc; if (hugetlb_folio_subpool(src)) { hugetlb_set_folio_subpool(dst, hugetlb_folio_subpool(src)); hugetlb_set_folio_subpool(src, NULL); } folio_migrate_flags(dst, src); return MIGRATEPAGE_SUCCESS; } #else #define hugetlbfs_migrate_folio NULL #endif static int hugetlbfs_error_remove_folio(struct address_space *mapping, struct folio *folio) { return 0; } /* * Display the mount options in /proc/mounts. */ static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); struct hugepage_subpool *spool = sbinfo->spool; unsigned long hpage_size = huge_page_size(sbinfo->hstate); unsigned hpage_shift = huge_page_shift(sbinfo->hstate); char mod; if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, sbinfo->uid)); if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, sbinfo->gid)); if (sbinfo->mode != 0755) seq_printf(m, ",mode=%o", sbinfo->mode); if (sbinfo->max_inodes != -1) seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); hpage_size /= 1024; mod = 'K'; if (hpage_size >= 1024) { hpage_size /= 1024; mod = 'M'; } seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); if (spool) { if (spool->max_hpages != -1) seq_printf(m, ",size=%llu", (unsigned long long)spool->max_hpages << hpage_shift); if (spool->min_hpages != -1) seq_printf(m, ",min_size=%llu", (unsigned long long)spool->min_hpages << hpage_shift); } return 0; } static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); struct hstate *h = hstate_inode(d_inode(dentry)); u64 id = huge_encode_dev(dentry->d_sb->s_dev); buf->f_fsid = u64_to_fsid(id); buf->f_type = HUGETLBFS_MAGIC; buf->f_bsize = huge_page_size(h); if (sbinfo) { spin_lock(&sbinfo->stat_lock); /* If no limits set, just report 0 or -1 for max/free/used * blocks, like simple_statfs() */ if (sbinfo->spool) { long free_pages; spin_lock_irq(&sbinfo->spool->lock); buf->f_blocks = sbinfo->spool->max_hpages; free_pages = sbinfo->spool->max_hpages - sbinfo->spool->used_hpages; buf->f_bavail = buf->f_bfree = free_pages; spin_unlock_irq(&sbinfo->spool->lock); buf->f_files = sbinfo->max_inodes; buf->f_ffree = sbinfo->free_inodes; } spin_unlock(&sbinfo->stat_lock); } buf->f_namelen = NAME_MAX; return 0; } static void hugetlbfs_put_super(struct super_block *sb) { struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); if (sbi) { sb->s_fs_info = NULL; if (sbi->spool) hugepage_put_subpool(sbi->spool); kfree(sbi); } } static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) { if (sbinfo->free_inodes >= 0) { spin_lock(&sbinfo->stat_lock); if (unlikely(!sbinfo->free_inodes)) { spin_unlock(&sbinfo->stat_lock); return 0; } sbinfo->free_inodes--; spin_unlock(&sbinfo->stat_lock); } return 1; } static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) { if (sbinfo->free_inodes >= 0) { spin_lock(&sbinfo->stat_lock); sbinfo->free_inodes++; spin_unlock(&sbinfo->stat_lock); } } static struct kmem_cache *hugetlbfs_inode_cachep; static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); struct hugetlbfs_inode_info *p; if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) return NULL; p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); if (unlikely(!p)) { hugetlbfs_inc_free_inodes(sbinfo); return NULL; } return &p->vfs_inode; } static void hugetlbfs_free_inode(struct inode *inode) { trace_hugetlbfs_free_inode(inode); kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); } static void hugetlbfs_destroy_inode(struct inode *inode) { hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); } static const struct address_space_operations hugetlbfs_aops = { .write_begin = hugetlbfs_write_begin, .write_end = hugetlbfs_write_end, .dirty_folio = noop_dirty_folio, .migrate_folio = hugetlbfs_migrate_folio, .error_remove_folio = hugetlbfs_error_remove_folio, }; static void init_once(void *foo) { struct hugetlbfs_inode_info *ei = foo; inode_init_once(&ei->vfs_inode); } static const struct file_operations hugetlbfs_file_operations = { .read_iter = hugetlbfs_read_iter, .mmap = hugetlbfs_file_mmap, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, .llseek = default_llseek, .fallocate = hugetlbfs_fallocate, .fop_flags = FOP_HUGE_PAGES, }; static const struct inode_operations hugetlbfs_dir_inode_operations = { .create = hugetlbfs_create, .lookup = simple_lookup, .link = simple_link, .unlink = simple_unlink, .symlink = hugetlbfs_symlink, .mkdir = hugetlbfs_mkdir, .rmdir = simple_rmdir, .mknod = hugetlbfs_mknod, .rename = simple_rename, .setattr = hugetlbfs_setattr, .tmpfile = hugetlbfs_tmpfile, }; static const struct inode_operations hugetlbfs_inode_operations = { .setattr = hugetlbfs_setattr, }; static const struct super_operations hugetlbfs_ops = { .alloc_inode = hugetlbfs_alloc_inode, .free_inode = hugetlbfs_free_inode, .destroy_inode = hugetlbfs_destroy_inode, .evict_inode = hugetlbfs_evict_inode, .statfs = hugetlbfs_statfs, .put_super = hugetlbfs_put_super, .show_options = hugetlbfs_show_options, }; /* * Convert size option passed from command line to number of huge pages * in the pool specified by hstate. Size option could be in bytes * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). */ static long hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, enum hugetlbfs_size_type val_type) { if (val_type == NO_SIZE) return -1; if (val_type == SIZE_PERCENT) { size_opt <<= huge_page_shift(h); size_opt *= h->max_huge_pages; do_div(size_opt, 100); } size_opt >>= huge_page_shift(h); return size_opt; } /* * Parse one mount parameter. */ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct hugetlbfs_fs_context *ctx = fc->fs_private; struct fs_parse_result result; struct hstate *h; char *rest; unsigned long ps; int opt; opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_uid: ctx->uid = result.uid; return 0; case Opt_gid: ctx->gid = result.gid; return 0; case Opt_mode: ctx->mode = result.uint_32 & 01777U; return 0; case Opt_size: /* memparse() will accept a K/M/G without a digit */ if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->max_size_opt = memparse(param->string, &rest); ctx->max_val_type = SIZE_STD; if (*rest == '%') ctx->max_val_type = SIZE_PERCENT; return 0; case Opt_nr_inodes: /* memparse() will accept a K/M/G without a digit */ if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->nr_inodes = memparse(param->string, &rest); return 0; case Opt_pagesize: ps = memparse(param->string, &rest); h = size_to_hstate(ps); if (!h) { pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); return -EINVAL; } ctx->hstate = h; return 0; case Opt_min_size: /* memparse() will accept a K/M/G without a digit */ if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->min_size_opt = memparse(param->string, &rest); ctx->min_val_type = SIZE_STD; if (*rest == '%') ctx->min_val_type = SIZE_PERCENT; return 0; default: return -EINVAL; } bad_val: return invalfc(fc, "Bad value '%s' for mount option '%s'\n", param->string, param->key); } /* * Validate the parsed options. */ static int hugetlbfs_validate(struct fs_context *fc) { struct hugetlbfs_fs_context *ctx = fc->fs_private; /* * Use huge page pool size (in hstate) to convert the size * options to number of huge pages. If NO_SIZE, -1 is returned. */ ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, ctx->max_size_opt, ctx->max_val_type); ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, ctx->min_size_opt, ctx->min_val_type); /* * If max_size was specified, then min_size must be smaller */ if (ctx->max_val_type > NO_SIZE && ctx->min_hpages > ctx->max_hpages) { pr_err("Minimum size can not be greater than maximum size\n"); return -EINVAL; } return 0; } static int hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct hugetlbfs_fs_context *ctx = fc->fs_private; struct hugetlbfs_sb_info *sbinfo; sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); if (!sbinfo) return -ENOMEM; sb->s_fs_info = sbinfo; spin_lock_init(&sbinfo->stat_lock); sbinfo->hstate = ctx->hstate; sbinfo->max_inodes = ctx->nr_inodes; sbinfo->free_inodes = ctx->nr_inodes; sbinfo->spool = NULL; sbinfo->uid = ctx->uid; sbinfo->gid = ctx->gid; sbinfo->mode = ctx->mode; /* * Allocate and initialize subpool if maximum or minimum size is * specified. Any needed reservations (for minimum size) are taken * when the subpool is created. */ if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { sbinfo->spool = hugepage_new_subpool(ctx->hstate, ctx->max_hpages, ctx->min_hpages); if (!sbinfo->spool) goto out_free; } sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = huge_page_size(ctx->hstate); sb->s_blocksize_bits = huge_page_shift(ctx->hstate); sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; /* * Due to the special and limited functionality of hugetlbfs, it does * not work well as a stacking filesystem. */ sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); if (!sb->s_root) goto out_free; return 0; out_free: kfree(sbinfo->spool); kfree(sbinfo); return -ENOMEM; } static int hugetlbfs_get_tree(struct fs_context *fc) { int err = hugetlbfs_validate(fc); if (err) return err; return get_tree_nodev(fc, hugetlbfs_fill_super); } static void hugetlbfs_fs_context_free(struct fs_context *fc) { kfree(fc->fs_private); } static const struct fs_context_operations hugetlbfs_fs_context_ops = { .free = hugetlbfs_fs_context_free, .parse_param = hugetlbfs_parse_param, .get_tree = hugetlbfs_get_tree, }; static int hugetlbfs_init_fs_context(struct fs_context *fc) { struct hugetlbfs_fs_context *ctx; ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->max_hpages = -1; /* No limit on size by default */ ctx->nr_inodes = -1; /* No limit on number of inodes by default */ ctx->uid = current_fsuid(); ctx->gid = current_fsgid(); ctx->mode = 0755; ctx->hstate = &default_hstate; ctx->min_hpages = -1; /* No default minimum size */ ctx->max_val_type = NO_SIZE; ctx->min_val_type = NO_SIZE; fc->fs_private = ctx; fc->ops = &hugetlbfs_fs_context_ops; return 0; } static struct file_system_type hugetlbfs_fs_type = { .name = "hugetlbfs", .init_fs_context = hugetlbfs_init_fs_context, .parameters = hugetlb_fs_parameters, .kill_sb = kill_litter_super, .fs_flags = FS_ALLOW_IDMAP, }; static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; static int can_do_hugetlb_shm(void) { kgid_t shm_group; shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); return capable(CAP_IPC_LOCK) || in_group_p(shm_group); } static int get_hstate_idx(int page_size_log) { struct hstate *h = hstate_sizelog(page_size_log); if (!h) return -1; return hstate_index(h); } /* * Note that size should be aligned to proper hugepage size in caller side, * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. */ struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, int creat_flags, int page_size_log) { struct inode *inode; struct vfsmount *mnt; int hstate_idx; struct file *file; hstate_idx = get_hstate_idx(page_size_log); if (hstate_idx < 0) return ERR_PTR(-ENODEV); mnt = hugetlbfs_vfsmount[hstate_idx]; if (!mnt) return ERR_PTR(-ENOENT); if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { struct ucounts *ucounts = current_ucounts(); if (user_shm_lock(size, ucounts)) { pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", current->comm, current->pid); user_shm_unlock(size, ucounts); } return ERR_PTR(-EPERM); } file = ERR_PTR(-ENOSPC); /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */ inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL, S_IFREG | S_IRWXUGO, 0); if (!inode) goto out; if (creat_flags == HUGETLB_SHMFS_INODE) inode->i_flags |= S_PRIVATE; inode->i_size = size; clear_nlink(inode); if (!hugetlb_reserve_pages(inode, 0, size >> huge_page_shift(hstate_inode(inode)), NULL, acctflag)) file = ERR_PTR(-ENOMEM); else file = alloc_file_pseudo(inode, mnt, name, O_RDWR, &hugetlbfs_file_operations); if (!IS_ERR(file)) return file; iput(inode); out: return file; } static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) { struct fs_context *fc; struct vfsmount *mnt; fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); if (IS_ERR(fc)) { mnt = ERR_CAST(fc); } else { struct hugetlbfs_fs_context *ctx = fc->fs_private; ctx->hstate = h; mnt = fc_mount(fc); put_fs_context(fc); } if (IS_ERR(mnt)) pr_err("Cannot mount internal hugetlbfs for page size %luK", huge_page_size(h) / SZ_1K); return mnt; } static int __init init_hugetlbfs_fs(void) { struct vfsmount *mnt; struct hstate *h; int error; int i; if (!hugepages_supported()) { pr_info("disabling because there are no supported hugepage sizes\n"); return -ENOTSUPP; } error = -ENOMEM; hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", sizeof(struct hugetlbfs_inode_info), 0, SLAB_ACCOUNT, init_once); if (hugetlbfs_inode_cachep == NULL) goto out; error = register_filesystem(&hugetlbfs_fs_type); if (error) goto out_free; /* default hstate mount is required */ mnt = mount_one_hugetlbfs(&default_hstate); if (IS_ERR(mnt)) { error = PTR_ERR(mnt); goto out_unreg; } hugetlbfs_vfsmount[default_hstate_idx] = mnt; /* other hstates are optional */ i = 0; for_each_hstate(h) { if (i == default_hstate_idx) { i++; continue; } mnt = mount_one_hugetlbfs(h); if (IS_ERR(mnt)) hugetlbfs_vfsmount[i] = NULL; else hugetlbfs_vfsmount[i] = mnt; i++; } return 0; out_unreg: (void)unregister_filesystem(&hugetlbfs_fs_type); out_free: kmem_cache_destroy(hugetlbfs_inode_cachep); out: return error; } fs_initcall(init_hugetlbfs_fs) |
1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 | // SPDX-License-Identifier: GPL-2.0+ /* * Belkin USB Serial Adapter Driver * * Copyright (C) 2000 William Greathouse (wgreathouse@smva.com) * Copyright (C) 2000-2001 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com) * * This program is largely derived from work by the linux-usb group * and associated source files. Please see the usb/serial files for * individual credits and copyrights. * * See Documentation/usb/usb-serial.rst for more information on using this * driver * * TODO: * -- Add true modem control line query capability. Currently we track the * states reported by the interrupt and the states we request. * -- Add support for flush commands */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "belkin_sa.h" #define DRIVER_AUTHOR "William Greathouse <wgreathouse@smva.com>" #define DRIVER_DESC "USB Belkin Serial converter driver" /* function prototypes for a Belkin USB Serial Adapter F5U103 */ static int belkin_sa_port_probe(struct usb_serial_port *port); static void belkin_sa_port_remove(struct usb_serial_port *port); static int belkin_sa_open(struct tty_struct *tty, struct usb_serial_port *port); static void belkin_sa_close(struct usb_serial_port *port); static void belkin_sa_read_int_callback(struct urb *urb); static void belkin_sa_process_read_urb(struct urb *urb); static void belkin_sa_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); static int belkin_sa_break_ctl(struct tty_struct *tty, int break_state); static int belkin_sa_tiocmget(struct tty_struct *tty); static int belkin_sa_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static const struct usb_device_id id_table[] = { { USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) }, { USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) }, { USB_DEVICE(PERACOM_VID, PERACOM_PID) }, { USB_DEVICE(GOHUBS_VID, GOHUBS_PID) }, { USB_DEVICE(GOHUBS_VID, HANDYLINK_PID) }, { USB_DEVICE(BELKIN_DOCKSTATION_VID, BELKIN_DOCKSTATION_PID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); /* All of the device info needed for the serial converters */ static struct usb_serial_driver belkin_device = { .driver = { .name = "belkin", }, .description = "Belkin / Peracom / GoHubs USB Serial Adapter", .id_table = id_table, .num_ports = 1, .open = belkin_sa_open, .close = belkin_sa_close, .read_int_callback = belkin_sa_read_int_callback, .process_read_urb = belkin_sa_process_read_urb, .set_termios = belkin_sa_set_termios, .break_ctl = belkin_sa_break_ctl, .tiocmget = belkin_sa_tiocmget, .tiocmset = belkin_sa_tiocmset, .port_probe = belkin_sa_port_probe, .port_remove = belkin_sa_port_remove, }; static struct usb_serial_driver * const serial_drivers[] = { &belkin_device, NULL }; struct belkin_sa_private { spinlock_t lock; unsigned long control_state; unsigned char last_lsr; unsigned char last_msr; int bad_flow_control; }; /* * *************************************************************************** * Belkin USB Serial Adapter F5U103 specific driver functions * *************************************************************************** */ #define WDR_TIMEOUT 5000 /* default urb timeout */ /* assumes that struct usb_serial *serial is available */ #define BSA_USB_CMD(c, v) usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), \ (c), BELKIN_SA_SET_REQUEST_TYPE, \ (v), 0, NULL, 0, WDR_TIMEOUT) static int belkin_sa_port_probe(struct usb_serial_port *port) { struct usb_device *dev = port->serial->dev; struct belkin_sa_private *priv; priv = kmalloc(sizeof(struct belkin_sa_private), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); priv->control_state = 0; priv->last_lsr = 0; priv->last_msr = 0; /* see comments at top of file */ priv->bad_flow_control = (le16_to_cpu(dev->descriptor.bcdDevice) <= 0x0206) ? 1 : 0; dev_info(&dev->dev, "bcdDevice: %04x, bfc: %d\n", le16_to_cpu(dev->descriptor.bcdDevice), priv->bad_flow_control); usb_set_serial_port_data(port, priv); return 0; } static void belkin_sa_port_remove(struct usb_serial_port *port) { struct belkin_sa_private *priv; priv = usb_get_serial_port_data(port); kfree(priv); } static int belkin_sa_open(struct tty_struct *tty, struct usb_serial_port *port) { int retval; retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (retval) { dev_err(&port->dev, "usb_submit_urb(read int) failed\n"); return retval; } retval = usb_serial_generic_open(tty, port); if (retval) usb_kill_urb(port->interrupt_in_urb); return retval; } static void belkin_sa_close(struct usb_serial_port *port) { usb_serial_generic_close(port); usb_kill_urb(port->interrupt_in_urb); } static void belkin_sa_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct belkin_sa_private *priv; unsigned char *data = urb->transfer_buffer; int retval; int status = urb->status; unsigned long flags; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(&port->dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } usb_serial_debug_data(&port->dev, __func__, urb->actual_length, data); /* Handle known interrupt data */ /* ignore data[0] and data[1] */ priv = usb_get_serial_port_data(port); spin_lock_irqsave(&priv->lock, flags); priv->last_msr = data[BELKIN_SA_MSR_INDEX]; /* Record Control Line states */ if (priv->last_msr & BELKIN_SA_MSR_DSR) priv->control_state |= TIOCM_DSR; else priv->control_state &= ~TIOCM_DSR; if (priv->last_msr & BELKIN_SA_MSR_CTS) priv->control_state |= TIOCM_CTS; else priv->control_state &= ~TIOCM_CTS; if (priv->last_msr & BELKIN_SA_MSR_RI) priv->control_state |= TIOCM_RI; else priv->control_state &= ~TIOCM_RI; if (priv->last_msr & BELKIN_SA_MSR_CD) priv->control_state |= TIOCM_CD; else priv->control_state &= ~TIOCM_CD; priv->last_lsr = data[BELKIN_SA_LSR_INDEX]; spin_unlock_irqrestore(&priv->lock, flags); exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(&port->dev, "%s - usb_submit_urb failed with " "result %d\n", __func__, retval); } static void belkin_sa_process_read_urb(struct urb *urb) { struct usb_serial_port *port = urb->context; struct belkin_sa_private *priv = usb_get_serial_port_data(port); unsigned char *data = urb->transfer_buffer; unsigned long flags; unsigned char status; char tty_flag; /* Update line status */ tty_flag = TTY_NORMAL; spin_lock_irqsave(&priv->lock, flags); status = priv->last_lsr; priv->last_lsr &= ~BELKIN_SA_LSR_ERR; spin_unlock_irqrestore(&priv->lock, flags); if (!urb->actual_length) return; if (status & BELKIN_SA_LSR_ERR) { /* Break takes precedence over parity, which takes precedence * over framing errors. */ if (status & BELKIN_SA_LSR_BI) tty_flag = TTY_BREAK; else if (status & BELKIN_SA_LSR_PE) tty_flag = TTY_PARITY; else if (status & BELKIN_SA_LSR_FE) tty_flag = TTY_FRAME; dev_dbg(&port->dev, "tty_flag = %d\n", tty_flag); /* Overrun is special, not associated with a char. */ if (status & BELKIN_SA_LSR_OE) tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } tty_insert_flip_string_fixed_flag(&port->port, data, tty_flag, urb->actual_length); tty_flip_buffer_push(&port->port); } static void belkin_sa_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct usb_serial *serial = port->serial; struct belkin_sa_private *priv = usb_get_serial_port_data(port); unsigned int iflag; unsigned int cflag; unsigned int old_iflag = 0; unsigned int old_cflag = 0; __u16 urb_value = 0; /* Will hold the new flags */ unsigned long flags; unsigned long control_state; int bad_flow_control; speed_t baud; struct ktermios *termios = &tty->termios; iflag = termios->c_iflag; cflag = termios->c_cflag; termios->c_cflag &= ~CMSPAR; /* get a local copy of the current port settings */ spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; bad_flow_control = priv->bad_flow_control; spin_unlock_irqrestore(&priv->lock, flags); old_iflag = old_termios->c_iflag; old_cflag = old_termios->c_cflag; /* Set the baud rate */ if ((cflag & CBAUD) != (old_cflag & CBAUD)) { /* reassert DTR and (maybe) RTS on transition from B0 */ if ((old_cflag & CBAUD) == B0) { control_state |= (TIOCM_DTR|TIOCM_RTS); if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 1) < 0) dev_err(&port->dev, "Set DTR error\n"); /* don't set RTS if using hardware flow control */ if (!(old_cflag & CRTSCTS)) if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST , 1) < 0) dev_err(&port->dev, "Set RTS error\n"); } } baud = tty_get_baud_rate(tty); if (baud) { urb_value = BELKIN_SA_BAUD(baud); /* Clip to maximum speed */ if (urb_value == 0) urb_value = 1; /* Turn it back into a resulting real baud rate */ baud = BELKIN_SA_BAUD(urb_value); /* Report the actual baud rate back to the caller */ tty_encode_baud_rate(tty, baud, baud); if (BSA_USB_CMD(BELKIN_SA_SET_BAUDRATE_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set baudrate error\n"); } else { /* Disable flow control */ if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, BELKIN_SA_FLOW_NONE) < 0) dev_err(&port->dev, "Disable flowcontrol error\n"); /* Drop RTS and DTR */ control_state &= ~(TIOCM_DTR | TIOCM_RTS); if (BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, 0) < 0) dev_err(&port->dev, "DTR LOW error\n"); if (BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, 0) < 0) dev_err(&port->dev, "RTS LOW error\n"); } /* set the parity */ if ((cflag ^ old_cflag) & (PARENB | PARODD)) { if (cflag & PARENB) urb_value = (cflag & PARODD) ? BELKIN_SA_PARITY_ODD : BELKIN_SA_PARITY_EVEN; else urb_value = BELKIN_SA_PARITY_NONE; if (BSA_USB_CMD(BELKIN_SA_SET_PARITY_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set parity error\n"); } /* set the number of data bits */ if ((cflag & CSIZE) != (old_cflag & CSIZE)) { urb_value = BELKIN_SA_DATA_BITS(tty_get_char_size(cflag)); if (BSA_USB_CMD(BELKIN_SA_SET_DATA_BITS_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set data bits error\n"); } /* set the number of stop bits */ if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) { urb_value = (cflag & CSTOPB) ? BELKIN_SA_STOP_BITS(2) : BELKIN_SA_STOP_BITS(1); if (BSA_USB_CMD(BELKIN_SA_SET_STOP_BITS_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set stop bits error\n"); } /* Set flow control */ if (((iflag ^ old_iflag) & (IXOFF | IXON)) || ((cflag ^ old_cflag) & CRTSCTS)) { urb_value = 0; if ((iflag & IXOFF) || (iflag & IXON)) urb_value |= (BELKIN_SA_FLOW_OXON | BELKIN_SA_FLOW_IXON); else urb_value &= ~(BELKIN_SA_FLOW_OXON | BELKIN_SA_FLOW_IXON); if (cflag & CRTSCTS) urb_value |= (BELKIN_SA_FLOW_OCTS | BELKIN_SA_FLOW_IRTS); else urb_value &= ~(BELKIN_SA_FLOW_OCTS | BELKIN_SA_FLOW_IRTS); if (bad_flow_control) urb_value &= ~(BELKIN_SA_FLOW_IRTS); if (BSA_USB_CMD(BELKIN_SA_SET_FLOW_CTRL_REQUEST, urb_value) < 0) dev_err(&port->dev, "Set flow control error\n"); } /* save off the modified port settings */ spin_lock_irqsave(&priv->lock, flags); priv->control_state = control_state; spin_unlock_irqrestore(&priv->lock, flags); } static int belkin_sa_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; int ret; ret = BSA_USB_CMD(BELKIN_SA_SET_BREAK_REQUEST, break_state ? 1 : 0); if (ret < 0) { dev_err(&port->dev, "Set break_ctl %d\n", break_state); return ret; } return 0; } static int belkin_sa_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct belkin_sa_private *priv = usb_get_serial_port_data(port); unsigned long control_state; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; spin_unlock_irqrestore(&priv->lock, flags); return control_state; } static int belkin_sa_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; struct belkin_sa_private *priv = usb_get_serial_port_data(port); unsigned long control_state; unsigned long flags; int retval; int rts = 0; int dtr = 0; spin_lock_irqsave(&priv->lock, flags); control_state = priv->control_state; if (set & TIOCM_RTS) { control_state |= TIOCM_RTS; rts = 1; } if (set & TIOCM_DTR) { control_state |= TIOCM_DTR; dtr = 1; } if (clear & TIOCM_RTS) { control_state &= ~TIOCM_RTS; rts = 0; } if (clear & TIOCM_DTR) { control_state &= ~TIOCM_DTR; dtr = 0; } priv->control_state = control_state; spin_unlock_irqrestore(&priv->lock, flags); retval = BSA_USB_CMD(BELKIN_SA_SET_RTS_REQUEST, rts); if (retval < 0) { dev_err(&port->dev, "Set RTS error %d\n", retval); goto exit; } retval = BSA_USB_CMD(BELKIN_SA_SET_DTR_REQUEST, dtr); if (retval < 0) { dev_err(&port->dev, "Set DTR error %d\n", retval); goto exit; } exit: return retval; } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); |
10 38 3 2181 2183 38 20 23 22 22 22 10 10 10 2 10 13 1 1 11 1 1 1 1 1 1 1 1 1 1 10 1 1 1 2 1 1 1 2 1 6 5 1 1 23 1 1 16 4 12 10 2 12 12 1 1 1 1 2 1 1 4 5 5 28 28 28 1 2 28 2 2 20 1 1 12 11 1 17 5 9 3 5 1 4 4 2 1 1 20 1 1 5 3 7 7 3 1 3 2 1 1 30 25 1 1 15 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 | // SPDX-License-Identifier: GPL-2.0-or-later /* * * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk) */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/capability.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/stat.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <net/net_namespace.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/termios.h> /* For TIOCINQ/OUTQ */ #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/notifier.h> #include <net/netrom.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/ip.h> #include <net/tcp_states.h> #include <net/arp.h> #include <linux/init.h> static int nr_ndevs = 4; int sysctl_netrom_default_path_quality = NR_DEFAULT_QUAL; int sysctl_netrom_obsolescence_count_initialiser = NR_DEFAULT_OBS; int sysctl_netrom_network_ttl_initialiser = NR_DEFAULT_TTL; int sysctl_netrom_transport_timeout = NR_DEFAULT_T1; int sysctl_netrom_transport_maximum_tries = NR_DEFAULT_N2; int sysctl_netrom_transport_acknowledge_delay = NR_DEFAULT_T2; int sysctl_netrom_transport_busy_delay = NR_DEFAULT_T4; int sysctl_netrom_transport_requested_window_size = NR_DEFAULT_WINDOW; int sysctl_netrom_transport_no_activity_timeout = NR_DEFAULT_IDLE; int sysctl_netrom_routing_control = NR_DEFAULT_ROUTING; int sysctl_netrom_link_fails_count = NR_DEFAULT_FAILS; int sysctl_netrom_reset_circuit = NR_DEFAULT_RESET; static unsigned short circuit = 0x101; static HLIST_HEAD(nr_list); static DEFINE_SPINLOCK(nr_list_lock); static const struct proto_ops nr_proto_ops; /* * NETROM network devices are virtual network devices encapsulating NETROM * frames into AX.25 which will be sent through an AX.25 device, so form a * special "super class" of normal net devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key nr_netdev_xmit_lock_key; static struct lock_class_key nr_netdev_addr_lock_key; static void nr_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key); } static void nr_set_lockdep_key(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL); } /* * Socket removal during an interrupt is now safe. */ static void nr_remove_socket(struct sock *sk) { spin_lock_bh(&nr_list_lock); sk_del_node_init(sk); spin_unlock_bh(&nr_list_lock); } /* * Kill all bound sockets on a dropped device. */ static void nr_kill_by_device(struct net_device *dev) { struct sock *s; spin_lock_bh(&nr_list_lock); sk_for_each(s, &nr_list) if (nr_sk(s)->device == dev) nr_disconnect(s, ENETUNREACH); spin_unlock_bh(&nr_list_lock); } /* * Handle device status changes. */ static int nr_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (event != NETDEV_DOWN) return NOTIFY_DONE; nr_kill_by_device(dev); nr_rt_device_down(dev); return NOTIFY_DONE; } /* * Add a socket to the bound sockets list. */ static void nr_insert_socket(struct sock *sk) { spin_lock_bh(&nr_list_lock); sk_add_node(sk, &nr_list); spin_unlock_bh(&nr_list_lock); } /* * Find a socket that wants to accept the Connect Request we just * received. */ static struct sock *nr_find_listener(ax25_address *addr) { struct sock *s; spin_lock_bh(&nr_list_lock); sk_for_each(s, &nr_list) if (!ax25cmp(&nr_sk(s)->source_addr, addr) && s->sk_state == TCP_LISTEN) { sock_hold(s); goto found; } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find a connected NET/ROM socket given my circuit IDs. */ static struct sock *nr_find_socket(unsigned char index, unsigned char id) { struct sock *s; spin_lock_bh(&nr_list_lock); sk_for_each(s, &nr_list) { struct nr_sock *nr = nr_sk(s); if (nr->my_index == index && nr->my_id == id) { sock_hold(s); goto found; } } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find a connected NET/ROM socket given their circuit IDs. */ static struct sock *nr_find_peer(unsigned char index, unsigned char id, ax25_address *dest) { struct sock *s; spin_lock_bh(&nr_list_lock); sk_for_each(s, &nr_list) { struct nr_sock *nr = nr_sk(s); if (nr->your_index == index && nr->your_id == id && !ax25cmp(&nr->dest_addr, dest)) { sock_hold(s); goto found; } } s = NULL; found: spin_unlock_bh(&nr_list_lock); return s; } /* * Find next free circuit ID. */ static unsigned short nr_find_next_circuit(void) { unsigned short id = circuit; unsigned char i, j; struct sock *sk; for (;;) { i = id / 256; j = id % 256; if (i != 0 && j != 0) { if ((sk=nr_find_socket(i, j)) == NULL) break; sock_put(sk); } id++; } return id; } /* * Deferred destroy. */ void nr_destroy_socket(struct sock *); /* * Handler for deferred kills. */ static void nr_destroy_timer(struct timer_list *t) { struct sock *sk = from_timer(sk, t, sk_timer); bh_lock_sock(sk); sock_hold(sk); nr_destroy_socket(sk); bh_unlock_sock(sk); sock_put(sk); } /* * This is called from user mode and the timers. Thus it protects itself * against interrupt users but doesn't worry about being called during * work. Once it is removed from the queue no interrupt or bottom half * will touch it and we are (fairly 8-) ) safe. */ void nr_destroy_socket(struct sock *sk) { struct sk_buff *skb; nr_remove_socket(sk); nr_stop_heartbeat(sk); nr_stop_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr_clear_queues(sk); /* Flush the queues */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (skb->sk != sk) { /* A pending connection */ /* Queue the unaccepted socket for death */ sock_set_flag(skb->sk, SOCK_DEAD); nr_start_heartbeat(skb->sk); nr_sk(skb->sk)->state = NR_STATE_0; } kfree_skb(skb); } if (sk_has_allocations(sk)) { /* Defer: outstanding buffers */ sk->sk_timer.function = nr_destroy_timer; sk->sk_timer.expires = jiffies + 2 * HZ; add_timer(&sk->sk_timer); } else sock_put(sk); } /* * Handling for system calls applied via the various interfaces to a * NET/ROM socket object. */ static int nr_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); unsigned int opt; if (level != SOL_NETROM) return -ENOPROTOOPT; if (optlen < sizeof(unsigned int)) return -EINVAL; if (copy_from_sockptr(&opt, optval, sizeof(opt))) return -EFAULT; switch (optname) { case NETROM_T1: if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t1 = opt * HZ; return 0; case NETROM_T2: if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t2 = opt * HZ; return 0; case NETROM_N2: if (opt < 1 || opt > 31) return -EINVAL; nr->n2 = opt; return 0; case NETROM_T4: if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; nr->t4 = opt * HZ; return 0; case NETROM_IDLE: if (opt > UINT_MAX / (60 * HZ)) return -EINVAL; nr->idle = opt * 60 * HZ; return 0; default: return -ENOPROTOOPT; } } static int nr_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); int val = 0; int len; if (level != SOL_NETROM) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case NETROM_T1: val = nr->t1 / HZ; break; case NETROM_T2: val = nr->t2 / HZ; break; case NETROM_N2: val = nr->n2; break; case NETROM_T4: val = nr->t4 / HZ; break; case NETROM_IDLE: val = nr->idle / (60 * HZ); break; default: return -ENOPROTOOPT; } len = min_t(unsigned int, len, sizeof(int)); if (put_user(len, optlen)) return -EFAULT; return copy_to_user(optval, &val, len) ? -EFAULT : 0; } static int nr_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; lock_sock(sk); if (sock->state != SS_UNCONNECTED) { release_sock(sk); return -EINVAL; } if (sk->sk_state != TCP_LISTEN) { memset(&nr_sk(sk)->user_addr, 0, AX25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; sk->sk_state = TCP_LISTEN; release_sock(sk); return 0; } release_sock(sk); return -EOPNOTSUPP; } static struct proto nr_proto = { .name = "NETROM", .owner = THIS_MODULE, .obj_size = sizeof(struct nr_sock), }; static int nr_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct nr_sock *nr; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (sock->type != SOCK_SEQPACKET || protocol != 0) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_NETROM, GFP_ATOMIC, &nr_proto, kern); if (sk == NULL) return -ENOMEM; nr = nr_sk(sk); sock_init_data(sock, sk); sock->ops = &nr_proto_ops; sk->sk_protocol = protocol; skb_queue_head_init(&nr->ack_queue); skb_queue_head_init(&nr->reseq_queue); skb_queue_head_init(&nr->frag_queue); nr_init_timers(sk); nr->t1 = msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_timeout)); nr->t2 = msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_acknowledge_delay)); nr->n2 = msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_maximum_tries)); nr->t4 = msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_busy_delay)); nr->idle = msecs_to_jiffies(READ_ONCE(sysctl_netrom_transport_no_activity_timeout)); nr->window = READ_ONCE(sysctl_netrom_transport_requested_window_size); nr->bpqext = 1; nr->state = NR_STATE_0; return 0; } static struct sock *nr_make_new(struct sock *osk) { struct sock *sk; struct nr_sock *nr, *onr; if (osk->sk_type != SOCK_SEQPACKET) return NULL; sk = sk_alloc(sock_net(osk), PF_NETROM, GFP_ATOMIC, osk->sk_prot, 0); if (sk == NULL) return NULL; nr = nr_sk(sk); sock_init_data(NULL, sk); sk->sk_type = osk->sk_type; sk->sk_priority = READ_ONCE(osk->sk_priority); sk->sk_protocol = osk->sk_protocol; sk->sk_rcvbuf = osk->sk_rcvbuf; sk->sk_sndbuf = osk->sk_sndbuf; sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); skb_queue_head_init(&nr->ack_queue); skb_queue_head_init(&nr->reseq_queue); skb_queue_head_init(&nr->frag_queue); nr_init_timers(sk); onr = nr_sk(osk); nr->t1 = onr->t1; nr->t2 = onr->t2; nr->n2 = onr->n2; nr->t4 = onr->t4; nr->idle = onr->idle; nr->window = onr->window; nr->device = onr->device; nr->bpqext = onr->bpqext; return sk; } static int nr_release(struct socket *sock) { struct sock *sk = sock->sk; struct nr_sock *nr; if (sk == NULL) return 0; sock_hold(sk); sock_orphan(sk); lock_sock(sk); nr = nr_sk(sk); switch (nr->state) { case NR_STATE_0: case NR_STATE_1: case NR_STATE_2: nr_disconnect(sk, 0); nr_destroy_socket(sk); break; case NR_STATE_3: nr_clear_queues(sk); nr->n2count = 0; nr_write_internal(sk, NR_DISCREQ); nr_start_t1timer(sk); nr_stop_t2timer(sk); nr_stop_t4timer(sk); nr_stop_idletimer(sk); nr->state = NR_STATE_2; sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DESTROY); break; default: break; } sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct full_sockaddr_ax25 *addr = (struct full_sockaddr_ax25 *)uaddr; struct net_device *dev; ax25_uid_assoc *user; ax25_address *source; lock_sock(sk); if (!sock_flag(sk, SOCK_ZAPPED)) { release_sock(sk); return -EINVAL; } if (addr_len < sizeof(struct sockaddr_ax25) || addr_len > sizeof(struct full_sockaddr_ax25)) { release_sock(sk); return -EINVAL; } if (addr_len < (addr->fsa_ax25.sax25_ndigis * sizeof(ax25_address) + sizeof(struct sockaddr_ax25))) { release_sock(sk); return -EINVAL; } if (addr->fsa_ax25.sax25_family != AF_NETROM) { release_sock(sk); return -EINVAL; } if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) { release_sock(sk); return -EADDRNOTAVAIL; } /* * Only the super user can set an arbitrary user callsign. */ if (addr->fsa_ax25.sax25_ndigis == 1) { if (!capable(CAP_NET_BIND_SERVICE)) { dev_put(dev); release_sock(sk); return -EPERM; } nr->user_addr = addr->fsa_digipeater[0]; nr->source_addr = addr->fsa_ax25.sax25_call; } else { source = &addr->fsa_ax25.sax25_call; user = ax25_findbyuid(current_euid()); if (user) { nr->user_addr = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { release_sock(sk); dev_put(dev); return -EPERM; } nr->user_addr = *source; } nr->source_addr = *source; } nr->device = dev; nr_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); dev_put(dev); release_sock(sk); return 0; } static int nr_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); struct sockaddr_ax25 *addr = (struct sockaddr_ax25 *)uaddr; const ax25_address *source = NULL; ax25_uid_assoc *user; struct net_device *dev; int err = 0; lock_sock(sk); if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; goto out_release; /* Connect completed during a ERESTARTSYS event */ } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out_release; } if (sk->sk_state == TCP_ESTABLISHED) { err = -EISCONN; /* No reconnect on a seqpacket socket */ goto out_release; } if (sock->state == SS_CONNECTING) { err = -EALREADY; goto out_release; } sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; if (addr_len != sizeof(struct sockaddr_ax25) && addr_len != sizeof(struct full_sockaddr_ax25)) { err = -EINVAL; goto out_release; } if (addr->sax25_family != AF_NETROM) { err = -EINVAL; goto out_release; } if (sock_flag(sk, SOCK_ZAPPED)) { /* Must bind first - autobinding in this may or may not work */ sock_reset_flag(sk, SOCK_ZAPPED); if ((dev = nr_dev_first()) == NULL) { err = -ENETUNREACH; goto out_release; } source = (const ax25_address *)dev->dev_addr; user = ax25_findbyuid(current_euid()); if (user) { nr->user_addr = user->call; ax25_uid_put(user); } else { if (ax25_uid_policy && !capable(CAP_NET_ADMIN)) { dev_put(dev); err = -EPERM; goto out_release; } nr->user_addr = *source; } nr->source_addr = *source; nr->device = dev; dev_put(dev); nr_insert_socket(sk); /* Finish the bind */ } nr->dest_addr = addr->sax25_call; release_sock(sk); circuit = nr_find_next_circuit(); lock_sock(sk); nr->my_index = circuit / 256; nr->my_id = circuit % 256; circuit++; /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; nr_establish_data_link(sk); nr->state = NR_STATE_1; nr_start_heartbeat(sk); /* Now the loop */ if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) { err = -EINPROGRESS; goto out_release; } /* * A Connect Ack with Choke or timeout or failed routing will go to * closed. */ if (sk->sk_state == TCP_SYN_SENT) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (sk->sk_state != TCP_SYN_SENT) break; if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; } if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); /* Always set at this point */ goto out_release; } sock->state = SS_CONNECTED; out_release: release_sock(sk); return err; } static int nr_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { struct sk_buff *skb; struct sock *newsk; DEFINE_WAIT(wait); struct sock *sk; int err = 0; if ((sk = sock->sk) == NULL) return -EINVAL; lock_sock(sk); if (sk->sk_type != SOCK_SEQPACKET) { err = -EOPNOTSUPP; goto out_release; } if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto out_release; } /* * The write queue this time is holding sockets ready to use * hooked into the SABM we saved */ for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; if (arg->flags & O_NONBLOCK) { err = -EWOULDBLOCK; break; } if (!signal_pending(current)) { release_sock(sk); schedule(); lock_sock(sk); continue; } err = -ERESTARTSYS; break; } finish_wait(sk_sleep(sk), &wait); if (err) goto out_release; newsk = skb->sk; sock_graft(newsk, newsock); /* Now attach up the new socket */ kfree_skb(skb); sk_acceptq_removed(sk); out_release: release_sock(sk); return err; } static int nr_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct full_sockaddr_ax25 *sax = (struct full_sockaddr_ax25 *)uaddr; struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); int uaddr_len; memset(&sax->fsa_ax25, 0, sizeof(struct sockaddr_ax25)); lock_sock(sk); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } sax->fsa_ax25.sax25_family = AF_NETROM; sax->fsa_ax25.sax25_ndigis = 1; sax->fsa_ax25.sax25_call = nr->user_addr; memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater)); sax->fsa_digipeater[0] = nr->dest_addr; uaddr_len = sizeof(struct full_sockaddr_ax25); } else { sax->fsa_ax25.sax25_family = AF_NETROM; sax->fsa_ax25.sax25_ndigis = 0; sax->fsa_ax25.sax25_call = nr->source_addr; uaddr_len = sizeof(struct sockaddr_ax25); } release_sock(sk); return uaddr_len; } int nr_rx_frame(struct sk_buff *skb, struct net_device *dev) { struct sock *sk; struct sock *make; struct nr_sock *nr_make; ax25_address *src, *dest, *user; unsigned short circuit_index, circuit_id; unsigned short peer_circuit_index, peer_circuit_id; unsigned short frametype, flags, window, timeout; int ret; skb_orphan(skb); /* * skb->data points to the netrom frame start */ src = (ax25_address *)(skb->data + 0); dest = (ax25_address *)(skb->data + 7); circuit_index = skb->data[15]; circuit_id = skb->data[16]; peer_circuit_index = skb->data[17]; peer_circuit_id = skb->data[18]; frametype = skb->data[19] & 0x0F; flags = skb->data[19] & 0xF0; /* * Check for an incoming IP over NET/ROM frame. */ if (frametype == NR_PROTOEXT && circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) { skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); skb_reset_transport_header(skb); return nr_rx_ip(skb, dev); } /* * Find an existing socket connection, based on circuit ID, if it's * a Connect Request base it on their circuit ID. * * Circuit ID 0/0 is not valid but it could still be a "reset" for a * circuit that no longer exists at the other end ... */ sk = NULL; if (circuit_index == 0 && circuit_id == 0) { if (frametype == NR_CONNACK && flags == NR_CHOKE_FLAG) sk = nr_find_peer(peer_circuit_index, peer_circuit_id, src); } else { if (frametype == NR_CONNREQ) sk = nr_find_peer(circuit_index, circuit_id, src); else sk = nr_find_socket(circuit_index, circuit_id); } if (sk != NULL) { bh_lock_sock(sk); skb_reset_transport_header(skb); if (frametype == NR_CONNACK && skb->len == 22) nr_sk(sk)->bpqext = 1; else nr_sk(sk)->bpqext = 0; ret = nr_process_rx_frame(sk, skb); bh_unlock_sock(sk); sock_put(sk); return ret; } /* * Now it should be a CONNREQ. */ if (frametype != NR_CONNREQ) { /* * Here it would be nice to be able to send a reset but * NET/ROM doesn't have one. We've tried to extend the protocol * by sending NR_CONNACK | NR_CHOKE_FLAGS replies but that * apparently kills BPQ boxes... :-( * So now we try to follow the established behaviour of * G8PZT's Xrouter which is sending packets with command type 7 * as an extension of the protocol. */ if (READ_ONCE(sysctl_netrom_reset_circuit) && (frametype != NR_RESET || flags != 0)) nr_transmit_reset(skb, 1); return 0; } sk = nr_find_listener(dest); user = (ax25_address *)(skb->data + 21); if (sk == NULL || sk_acceptq_is_full(sk) || (make = nr_make_new(sk)) == NULL) { nr_transmit_refusal(skb, 0); if (sk) sock_put(sk); return 0; } bh_lock_sock(sk); window = skb->data[20]; sock_hold(make); skb->sk = make; skb->destructor = sock_efree; make->sk_state = TCP_ESTABLISHED; /* Fill in his circuit details */ nr_make = nr_sk(make); nr_make->source_addr = *dest; nr_make->dest_addr = *src; nr_make->user_addr = *user; nr_make->your_index = circuit_index; nr_make->your_id = circuit_id; bh_unlock_sock(sk); circuit = nr_find_next_circuit(); bh_lock_sock(sk); nr_make->my_index = circuit / 256; nr_make->my_id = circuit % 256; circuit++; /* Window negotiation */ if (window < nr_make->window) nr_make->window = window; /* L4 timeout negotiation */ if (skb->len == 37) { timeout = skb->data[36] * 256 + skb->data[35]; if (timeout * HZ < nr_make->t1) nr_make->t1 = timeout * HZ; nr_make->bpqext = 1; } else { nr_make->bpqext = 0; } nr_write_internal(make, NR_CONNACK); nr_make->condition = 0x00; nr_make->vs = 0; nr_make->va = 0; nr_make->vr = 0; nr_make->vl = 0; nr_make->state = NR_STATE_3; sk_acceptq_added(sk); skb_queue_head(&sk->sk_receive_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk); bh_unlock_sock(sk); sock_put(sk); nr_insert_socket(make); nr_start_heartbeat(make); nr_start_idletimer(make); return 1; } static int nr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct nr_sock *nr = nr_sk(sk); DECLARE_SOCKADDR(struct sockaddr_ax25 *, usax, msg->msg_name); int err; struct sockaddr_ax25 sax; struct sk_buff *skb; unsigned char *asmptr; int size; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { err = -EADDRNOTAVAIL; goto out; } if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } if (nr->device == NULL) { err = -ENETUNREACH; goto out; } if (usax) { if (msg->msg_namelen < sizeof(sax)) { err = -EINVAL; goto out; } sax = *usax; if (ax25cmp(&nr->dest_addr, &sax.sax25_call) != 0) { err = -EISCONN; goto out; } if (sax.sax25_family != AF_NETROM) { err = -EINVAL; goto out; } } else { if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } sax.sax25_family = AF_NETROM; sax.sax25_call = nr->dest_addr; } /* Build a packet - the conventional user limit is 236 bytes. We can do ludicrously large NetROM frames but must not overflow */ if (len > 65536) { err = -EMSGSIZE; goto out; } size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) goto out; skb_reserve(skb, size - len); skb_reset_transport_header(skb); /* * Push down the NET/ROM header */ asmptr = skb_push(skb, NR_TRANSPORT_LEN); /* Build a NET/ROM Transport header */ *asmptr++ = nr->your_index; *asmptr++ = nr->your_id; *asmptr++ = 0; /* To be filled in later */ *asmptr++ = 0; /* Ditto */ *asmptr++ = NR_INFO; /* * Put the data on the end */ skb_put(skb, len); /* User data follows immediately after the NET/ROM transport header */ if (memcpy_from_msg(skb_transport_header(skb), msg, len)) { kfree_skb(skb); err = -EFAULT; goto out; } if (sk->sk_state != TCP_ESTABLISHED) { kfree_skb(skb); err = -ENOTCONN; goto out; } nr_output(sk, skb); /* Shove it onto the queue */ err = len; out: release_sock(sk); return err; } static int nr_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; DECLARE_SOCKADDR(struct sockaddr_ax25 *, sax, msg->msg_name); size_t copied; struct sk_buff *skb; int er; /* * This works for seqpacket too. The receiver has ordered the queue for * us! We do one quick check first though */ lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) { release_sock(sk); return -ENOTCONN; } /* Now we can treat all alike */ skb = skb_recv_datagram(sk, flags, &er); if (!skb) { release_sock(sk); return er; } skb_reset_transport_header(skb); copied = skb->len; if (copied > size) { copied = size; msg->msg_flags |= MSG_TRUNC; } er = skb_copy_datagram_msg(skb, 0, msg, copied); if (er < 0) { skb_free_datagram(sk, skb); release_sock(sk); return er; } if (sax != NULL) { memset(sax, 0, sizeof(*sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); msg->msg_namelen = sizeof(*sax); } skb_free_datagram(sk, skb); release_sock(sk); return copied; } static int nr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *)arg; switch (cmd) { case TIOCOUTQ: { long amount; lock_sock(sk); amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; release_sock(sk); return put_user(amount, (int __user *)argp); } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; lock_sock(sk); /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; release_sock(sk); return put_user(amount, (int __user *)argp); } case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: return -EINVAL; case SIOCADDRT: case SIOCDELRT: case SIOCNRDECOBS: if (!capable(CAP_NET_ADMIN)) return -EPERM; return nr_rt_ioctl(cmd, argp); default: return -ENOIOCTLCMD; } return 0; } #ifdef CONFIG_PROC_FS static void *nr_info_start(struct seq_file *seq, loff_t *pos) __acquires(&nr_list_lock) { spin_lock_bh(&nr_list_lock); return seq_hlist_start_head(&nr_list, *pos); } static void *nr_info_next(struct seq_file *seq, void *v, loff_t *pos) { return seq_hlist_next(v, &nr_list, pos); } static void nr_info_stop(struct seq_file *seq, void *v) __releases(&nr_list_lock) { spin_unlock_bh(&nr_list_lock); } static int nr_info_show(struct seq_file *seq, void *v) { struct sock *s = sk_entry(v); struct net_device *dev; struct nr_sock *nr; const char *devname; char buf[11]; if (v == SEQ_START_TOKEN) seq_puts(seq, "user_addr dest_node src_node dev my your st vs vr va t1 t2 t4 idle n2 wnd Snd-Q Rcv-Q inode\n"); else { bh_lock_sock(s); nr = nr_sk(s); if ((dev = nr->device) == NULL) devname = "???"; else devname = dev->name; seq_printf(seq, "%-9s ", ax2asc(buf, &nr->user_addr)); seq_printf(seq, "%-9s ", ax2asc(buf, &nr->dest_addr)); seq_printf(seq, "%-9s %-3s %02X/%02X %02X/%02X %2d %3d %3d %3d %3lu/%03lu %2lu/%02lu %3lu/%03lu %3lu/%03lu %2d/%02d %3d %5d %5d %ld\n", ax2asc(buf, &nr->source_addr), devname, nr->my_index, nr->my_id, nr->your_index, nr->your_id, nr->state, nr->vs, nr->vr, nr->va, ax25_display_timer(&nr->t1timer) / HZ, nr->t1 / HZ, ax25_display_timer(&nr->t2timer) / HZ, nr->t2 / HZ, ax25_display_timer(&nr->t4timer) / HZ, nr->t4 / HZ, ax25_display_timer(&nr->idletimer) / (60 * HZ), nr->idle / (60 * HZ), nr->n2count, nr->n2, nr->window, sk_wmem_alloc_get(s), sk_rmem_alloc_get(s), s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L); bh_unlock_sock(s); } return 0; } static const struct seq_operations nr_info_seqops = { .start = nr_info_start, .next = nr_info_next, .stop = nr_info_stop, .show = nr_info_show, }; #endif /* CONFIG_PROC_FS */ static const struct net_proto_family nr_family_ops = { .family = PF_NETROM, .create = nr_create, .owner = THIS_MODULE, }; static const struct proto_ops nr_proto_ops = { .family = PF_NETROM, .owner = THIS_MODULE, .release = nr_release, .bind = nr_bind, .connect = nr_connect, .socketpair = sock_no_socketpair, .accept = nr_accept, .getname = nr_getname, .poll = datagram_poll, .ioctl = nr_ioctl, .gettstamp = sock_gettstamp, .listen = nr_listen, .shutdown = sock_no_shutdown, .setsockopt = nr_setsockopt, .getsockopt = nr_getsockopt, .sendmsg = nr_sendmsg, .recvmsg = nr_recvmsg, .mmap = sock_no_mmap, }; static struct notifier_block nr_dev_notifier = { .notifier_call = nr_device_event, }; static struct net_device **dev_nr; static struct ax25_protocol nr_pid = { .pid = AX25_P_NETROM, .func = nr_route_frame }; static struct ax25_linkfail nr_linkfail_notifier = { .func = nr_link_failed, }; static int __init nr_proto_init(void) { int i; int rc = proto_register(&nr_proto, 0); if (rc) return rc; if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { pr_err("NET/ROM: %s - nr_ndevs parameter too large\n", __func__); rc = -EINVAL; goto unregister_proto; } dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL); if (!dev_nr) { pr_err("NET/ROM: %s - unable to allocate device array\n", __func__); rc = -ENOMEM; goto unregister_proto; } for (i = 0; i < nr_ndevs; i++) { char name[IFNAMSIZ]; struct net_device *dev; sprintf(name, "nr%d", i); dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup); if (!dev) { rc = -ENOMEM; goto fail; } dev->base_addr = i; rc = register_netdev(dev); if (rc) { free_netdev(dev); goto fail; } nr_set_lockdep_key(dev); dev_nr[i] = dev; } rc = sock_register(&nr_family_ops); if (rc) goto fail; rc = register_netdevice_notifier(&nr_dev_notifier); if (rc) goto out_sock; ax25_register_pid(&nr_pid); ax25_linkfail_register(&nr_linkfail_notifier); #ifdef CONFIG_SYSCTL rc = nr_register_sysctl(); if (rc) goto out_sysctl; #endif nr_loopback_init(); rc = -ENOMEM; if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops)) goto proc_remove1; if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops)) goto proc_remove2; if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops)) goto proc_remove3; return 0; proc_remove3: remove_proc_entry("nr_neigh", init_net.proc_net); proc_remove2: remove_proc_entry("nr", init_net.proc_net); proc_remove1: nr_loopback_clear(); nr_rt_free(); #ifdef CONFIG_SYSCTL nr_unregister_sysctl(); out_sysctl: #endif ax25_linkfail_release(&nr_linkfail_notifier); ax25_protocol_release(AX25_P_NETROM); unregister_netdevice_notifier(&nr_dev_notifier); out_sock: sock_unregister(PF_NETROM); fail: while (--i >= 0) { unregister_netdev(dev_nr[i]); free_netdev(dev_nr[i]); } kfree(dev_nr); unregister_proto: proto_unregister(&nr_proto); return rc; } module_init(nr_proto_init); module_param(nr_ndevs, int, 0); MODULE_PARM_DESC(nr_ndevs, "number of NET/ROM devices"); MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The amateur radio NET/ROM network and transport layer protocol"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_NETROM); static void __exit nr_exit(void) { int i; remove_proc_entry("nr", init_net.proc_net); remove_proc_entry("nr_neigh", init_net.proc_net); remove_proc_entry("nr_nodes", init_net.proc_net); nr_loopback_clear(); nr_rt_free(); #ifdef CONFIG_SYSCTL nr_unregister_sysctl(); #endif ax25_linkfail_release(&nr_linkfail_notifier); ax25_protocol_release(AX25_P_NETROM); unregister_netdevice_notifier(&nr_dev_notifier); sock_unregister(PF_NETROM); for (i = 0; i < nr_ndevs; i++) { struct net_device *dev = dev_nr[i]; if (dev) { unregister_netdev(dev); free_netdev(dev); } } kfree(dev_nr); proto_unregister(&nr_proto); } module_exit(nr_exit); |
9 9 9 12 5 7 9 11 11 12 7 9 7 4 7 7 7 975 240 772 205 2 1 25 2 7 2 12 9 3 16 19 18 44 28 20 51 5 46 5 1 1 2 1141 1142 384 4 30 23 51 4 1 19 26 5 5 20 19 19 53 53 5 1 45 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 | /* * memfd_create system call and file sealing support * * Code was originally included in shmem.c, and broken out to facilitate * use by hugetlbfs as well as tmpfs. * * This file is released under the GPL. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/khugepaged.h> #include <linux/syscalls.h> #include <linux/hugetlb.h> #include <linux/shmem_fs.h> #include <linux/memfd.h> #include <linux/pid_namespace.h> #include <uapi/linux/memfd.h> /* * We need a tag: a new tag would expand every xa_node by 8 bytes, * so reuse a tag which we firmly believe is never set or cleared on tmpfs * or hugetlbfs because they are memory only filesystems. */ #define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE #define LAST_SCAN 4 /* about 150ms max */ static bool memfd_folio_has_extra_refs(struct folio *folio) { return folio_ref_count(folio) - folio_mapcount(folio) != folio_nr_pages(folio); } static void memfd_tag_pins(struct xa_state *xas) { struct folio *folio; int latency = 0; lru_add_drain(); xas_lock_irq(xas); xas_for_each(xas, folio, ULONG_MAX) { if (!xa_is_value(folio) && memfd_folio_has_extra_refs(folio)) xas_set_mark(xas, MEMFD_TAG_PINNED); if (++latency < XA_CHECK_SCHED) continue; latency = 0; xas_pause(xas); xas_unlock_irq(xas); cond_resched(); xas_lock_irq(xas); } xas_unlock_irq(xas); } /* * This is a helper function used by memfd_pin_user_pages() in GUP (gup.c). * It is mainly called to allocate a folio in a memfd when the caller * (memfd_pin_folios()) cannot find a folio in the page cache at a given * index in the mapping. */ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) { #ifdef CONFIG_HUGETLB_PAGE struct folio *folio; gfp_t gfp_mask; int err; if (is_file_hugepages(memfd)) { /* * The folio would most likely be accessed by a DMA driver, * therefore, we have zone memory constraints where we can * alloc from. Also, the folio will be pinned for an indefinite * amount of time, so it is not expected to be migrated away. */ struct hstate *h = hstate_file(memfd); gfp_mask = htlb_alloc_mask(h); gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE); idx >>= huge_page_order(h); folio = alloc_hugetlb_folio_reserve(h, numa_node_id(), NULL, gfp_mask); if (folio) { err = hugetlb_add_to_page_cache(folio, memfd->f_mapping, idx); if (err) { folio_put(folio); return ERR_PTR(err); } folio_unlock(folio); return folio; } return ERR_PTR(-ENOMEM); } #endif return shmem_read_folio(memfd->f_mapping, idx); } /* * Setting SEAL_WRITE requires us to verify there's no pending writer. However, * via get_user_pages(), drivers might have some pending I/O without any active * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all folios * and see whether it has an elevated ref-count. If so, we tag them and wait for * them to be dropped. * The caller must guarantee that no new user will acquire writable references * to those folios to avoid races. */ static int memfd_wait_for_pins(struct address_space *mapping) { XA_STATE(xas, &mapping->i_pages, 0); struct folio *folio; int error, scan; memfd_tag_pins(&xas); error = 0; for (scan = 0; scan <= LAST_SCAN; scan++) { int latency = 0; if (!xas_marked(&xas, MEMFD_TAG_PINNED)) break; if (!scan) lru_add_drain_all(); else if (schedule_timeout_killable((HZ << scan) / 200)) scan = LAST_SCAN; xas_set(&xas, 0); xas_lock_irq(&xas); xas_for_each_marked(&xas, folio, ULONG_MAX, MEMFD_TAG_PINNED) { bool clear = true; if (!xa_is_value(folio) && memfd_folio_has_extra_refs(folio)) { /* * On the last scan, we clean up all those tags * we inserted; but make a note that we still * found folios pinned. */ if (scan == LAST_SCAN) error = -EBUSY; else clear = false; } if (clear) xas_clear_mark(&xas, MEMFD_TAG_PINNED); if (++latency < XA_CHECK_SCHED) continue; latency = 0; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); } return error; } static unsigned int *memfd_file_seals_ptr(struct file *file) { if (shmem_file(file)) return &SHMEM_I(file_inode(file))->seals; #ifdef CONFIG_HUGETLBFS if (is_file_hugepages(file)) return &HUGETLBFS_I(file_inode(file))->seals; #endif return NULL; } #define F_ALL_SEALS (F_SEAL_SEAL | \ F_SEAL_EXEC | \ F_SEAL_SHRINK | \ F_SEAL_GROW | \ F_SEAL_WRITE | \ F_SEAL_FUTURE_WRITE) static int memfd_add_seals(struct file *file, unsigned int seals) { struct inode *inode = file_inode(file); unsigned int *file_seals; int error; /* * SEALING * Sealing allows multiple parties to share a tmpfs or hugetlbfs file * but restrict access to a specific subset of file operations. Seals * can only be added, but never removed. This way, mutually untrusted * parties can share common memory regions with a well-defined policy. * A malicious peer can thus never perform unwanted operations on a * shared object. * * Seals are only supported on special tmpfs or hugetlbfs files and * always affect the whole underlying inode. Once a seal is set, it * may prevent some kinds of access to the file. Currently, the * following seals are defined: * SEAL_SEAL: Prevent further seals from being set on this file * SEAL_SHRINK: Prevent the file from shrinking * SEAL_GROW: Prevent the file from growing * SEAL_WRITE: Prevent write access to the file * SEAL_EXEC: Prevent modification of the exec bits in the file mode * * As we don't require any trust relationship between two parties, we * must prevent seals from being removed. Therefore, sealing a file * only adds a given set of seals to the file, it never touches * existing seals. Furthermore, the "setting seals"-operation can be * sealed itself, which basically prevents any further seal from being * added. * * Semantics of sealing are only defined on volatile files. Only * anonymous tmpfs and hugetlbfs files support sealing. More * importantly, seals are never written to disk. Therefore, there's * no plan to support it on other file types. */ if (!(file->f_mode & FMODE_WRITE)) return -EPERM; if (seals & ~(unsigned int)F_ALL_SEALS) return -EINVAL; inode_lock(inode); file_seals = memfd_file_seals_ptr(file); if (!file_seals) { error = -EINVAL; goto unlock; } if (*file_seals & F_SEAL_SEAL) { error = -EPERM; goto unlock; } if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) { error = mapping_deny_writable(file->f_mapping); if (error) goto unlock; error = memfd_wait_for_pins(file->f_mapping); if (error) { mapping_allow_writable(file->f_mapping); goto unlock; } } /* * SEAL_EXEC implies SEAL_WRITE, making W^X from the start. */ if (seals & F_SEAL_EXEC && inode->i_mode & 0111) seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE; *file_seals |= seals; error = 0; unlock: inode_unlock(inode); return error; } static int memfd_get_seals(struct file *file) { unsigned int *seals = memfd_file_seals_ptr(file); return seals ? *seals : -EINVAL; } long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg) { long error; switch (cmd) { case F_ADD_SEALS: error = memfd_add_seals(file, arg); break; case F_GET_SEALS: error = memfd_get_seals(file); break; default: error = -EINVAL; break; } return error; } #define MFD_NAME_PREFIX "memfd:" #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1) #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN) #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB | MFD_NOEXEC_SEAL | MFD_EXEC) static int check_sysctl_memfd_noexec(unsigned int *flags) { #ifdef CONFIG_SYSCTL struct pid_namespace *ns = task_active_pid_ns(current); int sysctl = pidns_memfd_noexec_scope(ns); if (!(*flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) { if (sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL) *flags |= MFD_NOEXEC_SEAL; else *flags |= MFD_EXEC; } if (!(*flags & MFD_NOEXEC_SEAL) && sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED) { pr_err_ratelimited( "%s[%d]: memfd_create() requires MFD_NOEXEC_SEAL with vm.memfd_noexec=%d\n", current->comm, task_pid_nr(current), sysctl); return -EACCES; } #endif return 0; } static inline bool is_write_sealed(unsigned int seals) { return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); } static int check_write_seal(unsigned long *vm_flags_ptr) { unsigned long vm_flags = *vm_flags_ptr; unsigned long mask = vm_flags & (VM_SHARED | VM_WRITE); /* If a private mapping then writability is irrelevant. */ if (!(mask & VM_SHARED)) return 0; /* * New PROT_WRITE and MAP_SHARED mmaps are not allowed when * write seals are active. */ if (mask & VM_WRITE) return -EPERM; /* * This is a read-only mapping, disallow mprotect() from making a * write-sealed mapping writable in future. */ *vm_flags_ptr &= ~VM_MAYWRITE; return 0; } int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr) { int err = 0; unsigned int *seals_ptr = memfd_file_seals_ptr(file); unsigned int seals = seals_ptr ? *seals_ptr : 0; if (is_write_sealed(seals)) err = check_write_seal(vm_flags_ptr); return err; } static int sanitize_flags(unsigned int *flags_ptr) { unsigned int flags = *flags_ptr; if (!(flags & MFD_HUGETLB)) { if (flags & ~(unsigned int)MFD_ALL_FLAGS) return -EINVAL; } else { /* Allow huge page size encoding in flags. */ if (flags & ~(unsigned int)(MFD_ALL_FLAGS | (MFD_HUGE_MASK << MFD_HUGE_SHIFT))) return -EINVAL; } /* Invalid if both EXEC and NOEXEC_SEAL are set.*/ if ((flags & MFD_EXEC) && (flags & MFD_NOEXEC_SEAL)) return -EINVAL; return check_sysctl_memfd_noexec(flags_ptr); } static char *alloc_name(const char __user *uname) { int error; char *name; long len; name = kmalloc(NAME_MAX + 1, GFP_KERNEL); if (!name) return ERR_PTR(-ENOMEM); strcpy(name, MFD_NAME_PREFIX); /* returned length does not include terminating zero */ len = strncpy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, MFD_NAME_MAX_LEN + 1); if (len < 0) { error = -EFAULT; goto err_name; } else if (len > MFD_NAME_MAX_LEN) { error = -EINVAL; goto err_name; } return name; err_name: kfree(name); return ERR_PTR(error); } static struct file *alloc_file(const char *name, unsigned int flags) { unsigned int *file_seals; struct file *file; if (flags & MFD_HUGETLB) { file = hugetlb_file_setup(name, 0, VM_NORESERVE, HUGETLB_ANONHUGE_INODE, (flags >> MFD_HUGE_SHIFT) & MFD_HUGE_MASK); } else { file = shmem_file_setup(name, 0, VM_NORESERVE); } if (IS_ERR(file)) return file; file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; file->f_flags |= O_LARGEFILE; if (flags & MFD_NOEXEC_SEAL) { struct inode *inode = file_inode(file); inode->i_mode &= ~0111; file_seals = memfd_file_seals_ptr(file); if (file_seals) { *file_seals &= ~F_SEAL_SEAL; *file_seals |= F_SEAL_EXEC; } } else if (flags & MFD_ALLOW_SEALING) { /* MFD_EXEC and MFD_ALLOW_SEALING are set */ file_seals = memfd_file_seals_ptr(file); if (file_seals) *file_seals &= ~F_SEAL_SEAL; } return file; } SYSCALL_DEFINE2(memfd_create, const char __user *, uname, unsigned int, flags) { struct file *file; int fd, error; char *name; error = sanitize_flags(&flags); if (error < 0) return error; name = alloc_name(uname); if (IS_ERR(name)) return PTR_ERR(name); fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); if (fd < 0) { error = fd; goto err_name; } file = alloc_file(name, flags); if (IS_ERR(file)) { error = PTR_ERR(file); goto err_fd; } fd_install(fd, file); kfree(name); return fd; err_fd: put_unused_fd(fd); err_name: kfree(name); return error; } |
48 47 142 142 176 176 70 70 70 221 221 5 8 8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 | // SPDX-License-Identifier: GPL-2.0 /* * dax: direct host memory access * Copyright (C) 2020 Red Hat, Inc. */ #include "fuse_i.h" #include <linux/delay.h> #include <linux/dax.h> #include <linux/uio.h> #include <linux/pagemap.h> #include <linux/pfn_t.h> #include <linux/iomap.h> #include <linux/interval_tree.h> /* * Default memory range size. A power of 2 so it agrees with common FUSE_INIT * map_alignment values 4KB and 64KB. */ #define FUSE_DAX_SHIFT 21 #define FUSE_DAX_SZ (1 << FUSE_DAX_SHIFT) #define FUSE_DAX_PAGES (FUSE_DAX_SZ / PAGE_SIZE) /* Number of ranges reclaimer will try to free in one invocation */ #define FUSE_DAX_RECLAIM_CHUNK (10) /* * Dax memory reclaim threshold in percetage of total ranges. When free * number of free ranges drops below this threshold, reclaim can trigger * Default is 20% */ #define FUSE_DAX_RECLAIM_THRESHOLD (20) /** Translation information for file offsets to DAX window offsets */ struct fuse_dax_mapping { /* Pointer to inode where this memory range is mapped */ struct inode *inode; /* Will connect in fcd->free_ranges to keep track of free memory */ struct list_head list; /* For interval tree in file/inode */ struct interval_tree_node itn; /* Will connect in fc->busy_ranges to keep track busy memory */ struct list_head busy_list; /** Position in DAX window */ u64 window_offset; /** Length of mapping, in bytes */ loff_t length; /* Is this mapping read-only or read-write */ bool writable; /* reference count when the mapping is used by dax iomap. */ refcount_t refcnt; }; /* Per-inode dax map */ struct fuse_inode_dax { /* Semaphore to protect modifications to the dmap tree */ struct rw_semaphore sem; /* Sorted rb tree of struct fuse_dax_mapping elements */ struct rb_root_cached tree; unsigned long nr; }; struct fuse_conn_dax { /* DAX device */ struct dax_device *dev; /* Lock protecting accessess to members of this structure */ spinlock_t lock; /* List of memory ranges which are busy */ unsigned long nr_busy_ranges; struct list_head busy_ranges; /* Worker to free up memory ranges */ struct delayed_work free_work; /* Wait queue for a dax range to become free */ wait_queue_head_t range_waitq; /* DAX Window Free Ranges */ long nr_free_ranges; struct list_head free_ranges; unsigned long nr_ranges; }; static inline struct fuse_dax_mapping * node_to_dmap(struct interval_tree_node *node) { if (!node) return NULL; return container_of(node, struct fuse_dax_mapping, itn); } static struct fuse_dax_mapping * alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode); static void __kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms) { unsigned long free_threshold; /* If number of free ranges are below threshold, start reclaim */ free_threshold = max_t(unsigned long, fcd->nr_ranges * FUSE_DAX_RECLAIM_THRESHOLD / 100, 1); if (fcd->nr_free_ranges < free_threshold) queue_delayed_work(system_long_wq, &fcd->free_work, msecs_to_jiffies(delay_ms)); } static void kick_dmap_free_worker(struct fuse_conn_dax *fcd, unsigned long delay_ms) { spin_lock(&fcd->lock); __kick_dmap_free_worker(fcd, delay_ms); spin_unlock(&fcd->lock); } static struct fuse_dax_mapping *alloc_dax_mapping(struct fuse_conn_dax *fcd) { struct fuse_dax_mapping *dmap; spin_lock(&fcd->lock); dmap = list_first_entry_or_null(&fcd->free_ranges, struct fuse_dax_mapping, list); if (dmap) { list_del_init(&dmap->list); WARN_ON(fcd->nr_free_ranges <= 0); fcd->nr_free_ranges--; } __kick_dmap_free_worker(fcd, 0); spin_unlock(&fcd->lock); return dmap; } /* This assumes fcd->lock is held */ static void __dmap_remove_busy_list(struct fuse_conn_dax *fcd, struct fuse_dax_mapping *dmap) { list_del_init(&dmap->busy_list); WARN_ON(fcd->nr_busy_ranges == 0); fcd->nr_busy_ranges--; } static void dmap_remove_busy_list(struct fuse_conn_dax *fcd, struct fuse_dax_mapping *dmap) { spin_lock(&fcd->lock); __dmap_remove_busy_list(fcd, dmap); spin_unlock(&fcd->lock); } /* This assumes fcd->lock is held */ static void __dmap_add_to_free_pool(struct fuse_conn_dax *fcd, struct fuse_dax_mapping *dmap) { list_add_tail(&dmap->list, &fcd->free_ranges); fcd->nr_free_ranges++; wake_up(&fcd->range_waitq); } static void dmap_add_to_free_pool(struct fuse_conn_dax *fcd, struct fuse_dax_mapping *dmap) { /* Return fuse_dax_mapping to free list */ spin_lock(&fcd->lock); __dmap_add_to_free_pool(fcd, dmap); spin_unlock(&fcd->lock); } static int fuse_setup_one_mapping(struct inode *inode, unsigned long start_idx, struct fuse_dax_mapping *dmap, bool writable, bool upgrade) { struct fuse_mount *fm = get_fuse_mount(inode); struct fuse_conn_dax *fcd = fm->fc->dax; struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_setupmapping_in inarg; loff_t offset = start_idx << FUSE_DAX_SHIFT; FUSE_ARGS(args); ssize_t err; WARN_ON(fcd->nr_free_ranges < 0); /* Ask fuse daemon to setup mapping */ memset(&inarg, 0, sizeof(inarg)); inarg.foffset = offset; inarg.fh = -1; inarg.moffset = dmap->window_offset; inarg.len = FUSE_DAX_SZ; inarg.flags |= FUSE_SETUPMAPPING_FLAG_READ; if (writable) inarg.flags |= FUSE_SETUPMAPPING_FLAG_WRITE; args.opcode = FUSE_SETUPMAPPING; args.nodeid = fi->nodeid; args.in_numargs = 1; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; err = fuse_simple_request(fm, &args); if (err < 0) return err; dmap->writable = writable; if (!upgrade) { /* * We don't take a reference on inode. inode is valid right now * and when inode is going away, cleanup logic should first * cleanup dmap entries. */ dmap->inode = inode; dmap->itn.start = dmap->itn.last = start_idx; /* Protected by fi->dax->sem */ interval_tree_insert(&dmap->itn, &fi->dax->tree); fi->dax->nr++; spin_lock(&fcd->lock); list_add_tail(&dmap->busy_list, &fcd->busy_ranges); fcd->nr_busy_ranges++; spin_unlock(&fcd->lock); } return 0; } static int fuse_send_removemapping(struct inode *inode, struct fuse_removemapping_in *inargp, struct fuse_removemapping_one *remove_one) { struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_mount *fm = get_fuse_mount(inode); FUSE_ARGS(args); args.opcode = FUSE_REMOVEMAPPING; args.nodeid = fi->nodeid; args.in_numargs = 3; fuse_set_zero_arg0(&args); args.in_args[1].size = sizeof(*inargp); args.in_args[1].value = inargp; args.in_args[2].size = inargp->count * sizeof(*remove_one); args.in_args[2].value = remove_one; return fuse_simple_request(fm, &args); } static int dmap_removemapping_list(struct inode *inode, unsigned int num, struct list_head *to_remove) { struct fuse_removemapping_one *remove_one, *ptr; struct fuse_removemapping_in inarg; struct fuse_dax_mapping *dmap; int ret, i = 0, nr_alloc; nr_alloc = min_t(unsigned int, num, FUSE_REMOVEMAPPING_MAX_ENTRY); remove_one = kmalloc_array(nr_alloc, sizeof(*remove_one), GFP_NOFS); if (!remove_one) return -ENOMEM; ptr = remove_one; list_for_each_entry(dmap, to_remove, list) { ptr->moffset = dmap->window_offset; ptr->len = dmap->length; ptr++; i++; num--; if (i >= nr_alloc || num == 0) { memset(&inarg, 0, sizeof(inarg)); inarg.count = i; ret = fuse_send_removemapping(inode, &inarg, remove_one); if (ret) goto out; ptr = remove_one; i = 0; } } out: kfree(remove_one); return ret; } /* * Cleanup dmap entry and add back to free list. This should be called with * fcd->lock held. */ static void dmap_reinit_add_to_free_pool(struct fuse_conn_dax *fcd, struct fuse_dax_mapping *dmap) { pr_debug("fuse: freeing memory range start_idx=0x%lx end_idx=0x%lx window_offset=0x%llx length=0x%llx\n", dmap->itn.start, dmap->itn.last, dmap->window_offset, dmap->length); __dmap_remove_busy_list(fcd, dmap); dmap->inode = NULL; dmap->itn.start = dmap->itn.last = 0; __dmap_add_to_free_pool(fcd, dmap); } /* * Free inode dmap entries whose range falls inside [start, end]. * Does not take any locks. At this point of time it should only be * called from evict_inode() path where we know all dmap entries can be * reclaimed. */ static void inode_reclaim_dmap_range(struct fuse_conn_dax *fcd, struct inode *inode, loff_t start, loff_t end) { struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_dax_mapping *dmap, *n; int err, num = 0; LIST_HEAD(to_remove); unsigned long start_idx = start >> FUSE_DAX_SHIFT; unsigned long end_idx = end >> FUSE_DAX_SHIFT; struct interval_tree_node *node; while (1) { node = interval_tree_iter_first(&fi->dax->tree, start_idx, end_idx); if (!node) break; dmap = node_to_dmap(node); /* inode is going away. There should not be any users of dmap */ WARN_ON(refcount_read(&dmap->refcnt) > 1); interval_tree_remove(&dmap->itn, &fi->dax->tree); num++; list_add(&dmap->list, &to_remove); } /* Nothing to remove */ if (list_empty(&to_remove)) return; WARN_ON(fi->dax->nr < num); fi->dax->nr -= num; err = dmap_removemapping_list(inode, num, &to_remove); if (err && err != -ENOTCONN) { pr_warn("Failed to removemappings. start=0x%llx end=0x%llx\n", start, end); } spin_lock(&fcd->lock); list_for_each_entry_safe(dmap, n, &to_remove, list) { list_del_init(&dmap->list); dmap_reinit_add_to_free_pool(fcd, dmap); } spin_unlock(&fcd->lock); } static int dmap_removemapping_one(struct inode *inode, struct fuse_dax_mapping *dmap) { struct fuse_removemapping_one forget_one; struct fuse_removemapping_in inarg; memset(&inarg, 0, sizeof(inarg)); inarg.count = 1; memset(&forget_one, 0, sizeof(forget_one)); forget_one.moffset = dmap->window_offset; forget_one.len = dmap->length; return fuse_send_removemapping(inode, &inarg, &forget_one); } /* * It is called from evict_inode() and by that time inode is going away. So * this function does not take any locks like fi->dax->sem for traversing * that fuse inode interval tree. If that lock is taken then lock validator * complains of deadlock situation w.r.t fs_reclaim lock. */ void fuse_dax_inode_cleanup(struct inode *inode) { struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); /* * fuse_evict_inode() has already called truncate_inode_pages_final() * before we arrive here. So we should not have to worry about any * pages/exception entries still associated with inode. */ inode_reclaim_dmap_range(fc->dax, inode, 0, -1); WARN_ON(fi->dax->nr); } static void fuse_fill_iomap_hole(struct iomap *iomap, loff_t length) { iomap->addr = IOMAP_NULL_ADDR; iomap->length = length; iomap->type = IOMAP_HOLE; } static void fuse_fill_iomap(struct inode *inode, loff_t pos, loff_t length, struct iomap *iomap, struct fuse_dax_mapping *dmap, unsigned int flags) { loff_t offset, len; loff_t i_size = i_size_read(inode); offset = pos - (dmap->itn.start << FUSE_DAX_SHIFT); len = min(length, dmap->length - offset); /* If length is beyond end of file, truncate further */ if (pos + len > i_size) len = i_size - pos; if (len > 0) { iomap->addr = dmap->window_offset + offset; iomap->length = len; if (flags & IOMAP_FAULT) iomap->length = ALIGN(len, PAGE_SIZE); iomap->type = IOMAP_MAPPED; /* * increace refcnt so that reclaim code knows this dmap is in * use. This assumes fi->dax->sem mutex is held either * shared/exclusive. */ refcount_inc(&dmap->refcnt); /* iomap->private should be NULL */ WARN_ON_ONCE(iomap->private); iomap->private = dmap; } else { /* Mapping beyond end of file is hole */ fuse_fill_iomap_hole(iomap, length); } } static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos, loff_t length, unsigned int flags, struct iomap *iomap) { struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_conn_dax *fcd = fc->dax; struct fuse_dax_mapping *dmap, *alloc_dmap = NULL; int ret; bool writable = flags & IOMAP_WRITE; unsigned long start_idx = pos >> FUSE_DAX_SHIFT; struct interval_tree_node *node; /* * Can't do inline reclaim in fault path. We call * dax_layout_busy_page() before we free a range. And * fuse_wait_dax_page() drops mapping->invalidate_lock and requires it. * In fault path we enter with mapping->invalidate_lock held and can't * drop it. Also in fault path we hold mapping->invalidate_lock shared * and not exclusive, so that creates further issues with * fuse_wait_dax_page(). Hence return -EAGAIN and fuse_dax_fault() * will wait for a memory range to become free and retry. */ if (flags & IOMAP_FAULT) { alloc_dmap = alloc_dax_mapping(fcd); if (!alloc_dmap) return -EAGAIN; } else { alloc_dmap = alloc_dax_mapping_reclaim(fcd, inode); if (IS_ERR(alloc_dmap)) return PTR_ERR(alloc_dmap); } /* If we are here, we should have memory allocated */ if (WARN_ON(!alloc_dmap)) return -EIO; /* * Take write lock so that only one caller can try to setup mapping * and other waits. */ down_write(&fi->dax->sem); /* * We dropped lock. Check again if somebody else setup * mapping already. */ node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx); if (node) { dmap = node_to_dmap(node); fuse_fill_iomap(inode, pos, length, iomap, dmap, flags); dmap_add_to_free_pool(fcd, alloc_dmap); up_write(&fi->dax->sem); return 0; } /* Setup one mapping */ ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, alloc_dmap, writable, false); if (ret < 0) { dmap_add_to_free_pool(fcd, alloc_dmap); up_write(&fi->dax->sem); return ret; } fuse_fill_iomap(inode, pos, length, iomap, alloc_dmap, flags); up_write(&fi->dax->sem); return 0; } static int fuse_upgrade_dax_mapping(struct inode *inode, loff_t pos, loff_t length, unsigned int flags, struct iomap *iomap) { struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_dax_mapping *dmap; int ret; unsigned long idx = pos >> FUSE_DAX_SHIFT; struct interval_tree_node *node; /* * Take exclusive lock so that only one caller can try to setup * mapping and others wait. */ down_write(&fi->dax->sem); node = interval_tree_iter_first(&fi->dax->tree, idx, idx); /* We are holding either inode lock or invalidate_lock, and that should * ensure that dmap can't be truncated. We are holding a reference * on dmap and that should make sure it can't be reclaimed. So dmap * should still be there in tree despite the fact we dropped and * re-acquired the fi->dax->sem lock. */ ret = -EIO; if (WARN_ON(!node)) goto out_err; dmap = node_to_dmap(node); /* We took an extra reference on dmap to make sure its not reclaimd. * Now we hold fi->dax->sem lock and that reference is not needed * anymore. Drop it. */ if (refcount_dec_and_test(&dmap->refcnt)) { /* refcount should not hit 0. This object only goes * away when fuse connection goes away */ WARN_ON_ONCE(1); } /* Maybe another thread already upgraded mapping while we were not * holding lock. */ if (dmap->writable) { ret = 0; goto out_fill_iomap; } ret = fuse_setup_one_mapping(inode, pos >> FUSE_DAX_SHIFT, dmap, true, true); if (ret < 0) goto out_err; out_fill_iomap: fuse_fill_iomap(inode, pos, length, iomap, dmap, flags); out_err: up_write(&fi->dax->sem); return ret; } /* This is just for DAX and the mapping is ephemeral, do not use it for other * purposes since there is no block device with a permanent mapping. */ static int fuse_iomap_begin(struct inode *inode, loff_t pos, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_dax_mapping *dmap; bool writable = flags & IOMAP_WRITE; unsigned long start_idx = pos >> FUSE_DAX_SHIFT; struct interval_tree_node *node; /* We don't support FIEMAP */ if (WARN_ON(flags & IOMAP_REPORT)) return -EIO; iomap->offset = pos; iomap->flags = 0; iomap->bdev = NULL; iomap->dax_dev = fc->dax->dev; /* * Both read/write and mmap path can race here. So we need something * to make sure if we are setting up mapping, then other path waits * * For now, use a semaphore for this. It probably needs to be * optimized later. */ down_read(&fi->dax->sem); node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx); if (node) { dmap = node_to_dmap(node); if (writable && !dmap->writable) { /* Upgrade read-only mapping to read-write. This will * require exclusive fi->dax->sem lock as we don't want * two threads to be trying to this simultaneously * for same dmap. So drop shared lock and acquire * exclusive lock. * * Before dropping fi->dax->sem lock, take reference * on dmap so that its not freed by range reclaim. */ refcount_inc(&dmap->refcnt); up_read(&fi->dax->sem); pr_debug("%s: Upgrading mapping at offset 0x%llx length 0x%llx\n", __func__, pos, length); return fuse_upgrade_dax_mapping(inode, pos, length, flags, iomap); } else { fuse_fill_iomap(inode, pos, length, iomap, dmap, flags); up_read(&fi->dax->sem); return 0; } } else { up_read(&fi->dax->sem); pr_debug("%s: no mapping at offset 0x%llx length 0x%llx\n", __func__, pos, length); if (pos >= i_size_read(inode)) goto iomap_hole; return fuse_setup_new_dax_mapping(inode, pos, length, flags, iomap); } /* * If read beyond end of file happens, fs code seems to return * it as hole */ iomap_hole: fuse_fill_iomap_hole(iomap, length); pr_debug("%s returning hole mapping. pos=0x%llx length_asked=0x%llx length_returned=0x%llx\n", __func__, pos, length, iomap->length); return 0; } static int fuse_iomap_end(struct inode *inode, loff_t pos, loff_t length, ssize_t written, unsigned int flags, struct iomap *iomap) { struct fuse_dax_mapping *dmap = iomap->private; if (dmap) { if (refcount_dec_and_test(&dmap->refcnt)) { /* refcount should not hit 0. This object only goes * away when fuse connection goes away */ WARN_ON_ONCE(1); } } /* DAX writes beyond end-of-file aren't handled using iomap, so the * file size is unchanged and there is nothing to do here. */ return 0; } static const struct iomap_ops fuse_iomap_ops = { .iomap_begin = fuse_iomap_begin, .iomap_end = fuse_iomap_end, }; static void fuse_wait_dax_page(struct inode *inode) { filemap_invalidate_unlock(inode->i_mapping); schedule(); filemap_invalidate_lock(inode->i_mapping); } /* Should be called with mapping->invalidate_lock held exclusively. */ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end) { return dax_break_layout(inode, dmap_start, dmap_end, fuse_wait_dax_page); } ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock_shared(inode)) return -EAGAIN; } else { inode_lock_shared(inode); } ret = dax_iomap_rw(iocb, to, &fuse_iomap_ops); inode_unlock_shared(inode); /* TODO file_accessed(iocb->f_filp) */ return ret; } static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); return (iov_iter_rw(from) == WRITE && ((iocb->ki_pos) >= i_size_read(inode) || (iocb->ki_pos + iov_iter_count(from) > i_size_read(inode)))); } static ssize_t fuse_dax_direct_write(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); ssize_t ret; ret = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); fuse_write_update_attr(inode, iocb->ki_pos, ret); return ret; } ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); ssize_t ret; if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock(inode)) return -EAGAIN; } else { inode_lock(inode); } ret = generic_write_checks(iocb, from); if (ret <= 0) goto out; ret = file_remove_privs(iocb->ki_filp); if (ret) goto out; /* TODO file_update_time() but we don't want metadata I/O */ /* Do not use dax for file extending writes as write and on * disk i_size increase are not atomic otherwise. */ if (file_extending_write(iocb, from)) ret = fuse_dax_direct_write(iocb, from); else ret = dax_iomap_rw(iocb, from, &fuse_iomap_ops); out: inode_unlock(inode); if (ret > 0) ret = generic_write_sync(iocb, ret); return ret; } static vm_fault_t __fuse_dax_fault(struct vm_fault *vmf, unsigned int order, bool write) { vm_fault_t ret; struct inode *inode = file_inode(vmf->vma->vm_file); struct super_block *sb = inode->i_sb; pfn_t pfn; int error = 0; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_conn_dax *fcd = fc->dax; bool retry = false; if (write) sb_start_pagefault(sb); retry: if (retry && !(fcd->nr_free_ranges > 0)) wait_event(fcd->range_waitq, (fcd->nr_free_ranges > 0)); /* * We need to serialize against not only truncate but also against * fuse dax memory range reclaim. While a range is being reclaimed, * we do not want any read/write/mmap to make progress and try * to populate page cache or access memory we are trying to free. */ filemap_invalidate_lock_shared(inode->i_mapping); ret = dax_iomap_fault(vmf, order, &pfn, &error, &fuse_iomap_ops); if ((ret & VM_FAULT_ERROR) && error == -EAGAIN) { error = 0; retry = true; filemap_invalidate_unlock_shared(inode->i_mapping); goto retry; } if (ret & VM_FAULT_NEEDDSYNC) ret = dax_finish_sync_fault(vmf, order, pfn); filemap_invalidate_unlock_shared(inode->i_mapping); if (write) sb_end_pagefault(sb); return ret; } static vm_fault_t fuse_dax_fault(struct vm_fault *vmf) { return __fuse_dax_fault(vmf, 0, vmf->flags & FAULT_FLAG_WRITE); } static vm_fault_t fuse_dax_huge_fault(struct vm_fault *vmf, unsigned int order) { return __fuse_dax_fault(vmf, order, vmf->flags & FAULT_FLAG_WRITE); } static vm_fault_t fuse_dax_page_mkwrite(struct vm_fault *vmf) { return __fuse_dax_fault(vmf, 0, true); } static vm_fault_t fuse_dax_pfn_mkwrite(struct vm_fault *vmf) { return __fuse_dax_fault(vmf, 0, true); } static const struct vm_operations_struct fuse_dax_vm_ops = { .fault = fuse_dax_fault, .huge_fault = fuse_dax_huge_fault, .page_mkwrite = fuse_dax_page_mkwrite, .pfn_mkwrite = fuse_dax_pfn_mkwrite, }; int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma) { file_accessed(file); vma->vm_ops = &fuse_dax_vm_ops; vm_flags_set(vma, VM_MIXEDMAP | VM_HUGEPAGE); return 0; } static int dmap_writeback_invalidate(struct inode *inode, struct fuse_dax_mapping *dmap) { int ret; loff_t start_pos = dmap->itn.start << FUSE_DAX_SHIFT; loff_t end_pos = (start_pos + FUSE_DAX_SZ - 1); ret = filemap_fdatawrite_range(inode->i_mapping, start_pos, end_pos); if (ret) { pr_debug("fuse: filemap_fdatawrite_range() failed. err=%d start_pos=0x%llx, end_pos=0x%llx\n", ret, start_pos, end_pos); return ret; } ret = invalidate_inode_pages2_range(inode->i_mapping, start_pos >> PAGE_SHIFT, end_pos >> PAGE_SHIFT); if (ret) pr_debug("fuse: invalidate_inode_pages2_range() failed err=%d\n", ret); return ret; } static int reclaim_one_dmap_locked(struct inode *inode, struct fuse_dax_mapping *dmap) { int ret; struct fuse_inode *fi = get_fuse_inode(inode); /* * igrab() was done to make sure inode won't go under us, and this * further avoids the race with evict(). */ ret = dmap_writeback_invalidate(inode, dmap); if (ret) return ret; /* Remove dax mapping from inode interval tree now */ interval_tree_remove(&dmap->itn, &fi->dax->tree); fi->dax->nr--; /* It is possible that umount/shutdown has killed the fuse connection * and worker thread is trying to reclaim memory in parallel. Don't * warn in that case. */ ret = dmap_removemapping_one(inode, dmap); if (ret && ret != -ENOTCONN) { pr_warn("Failed to remove mapping. offset=0x%llx len=0x%llx ret=%d\n", dmap->window_offset, dmap->length, ret); } return 0; } /* Find first mapped dmap for an inode and return file offset. Caller needs * to hold fi->dax->sem lock either shared or exclusive. */ static struct fuse_dax_mapping *inode_lookup_first_dmap(struct inode *inode) { struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_dax_mapping *dmap; struct interval_tree_node *node; for (node = interval_tree_iter_first(&fi->dax->tree, 0, -1); node; node = interval_tree_iter_next(node, 0, -1)) { dmap = node_to_dmap(node); /* still in use. */ if (refcount_read(&dmap->refcnt) > 1) continue; return dmap; } return NULL; } /* * Find first mapping in the tree and free it and return it. Do not add * it back to free pool. */ static struct fuse_dax_mapping * inode_inline_reclaim_one_dmap(struct fuse_conn_dax *fcd, struct inode *inode, bool *retry) { struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_dax_mapping *dmap; u64 dmap_start, dmap_end; unsigned long start_idx; int ret; struct interval_tree_node *node; filemap_invalidate_lock(inode->i_mapping); /* Lookup a dmap and corresponding file offset to reclaim. */ down_read(&fi->dax->sem); dmap = inode_lookup_first_dmap(inode); if (dmap) { start_idx = dmap->itn.start; dmap_start = start_idx << FUSE_DAX_SHIFT; dmap_end = dmap_start + FUSE_DAX_SZ - 1; } up_read(&fi->dax->sem); if (!dmap) goto out_mmap_sem; /* * Make sure there are no references to inode pages using * get_user_pages() */ ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end); if (ret) { pr_debug("fuse: fuse_dax_break_layouts() failed. err=%d\n", ret); dmap = ERR_PTR(ret); goto out_mmap_sem; } down_write(&fi->dax->sem); node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx); /* Range already got reclaimed by somebody else */ if (!node) { if (retry) *retry = true; goto out_write_dmap_sem; } dmap = node_to_dmap(node); /* still in use. */ if (refcount_read(&dmap->refcnt) > 1) { dmap = NULL; if (retry) *retry = true; goto out_write_dmap_sem; } ret = reclaim_one_dmap_locked(inode, dmap); if (ret < 0) { dmap = ERR_PTR(ret); goto out_write_dmap_sem; } /* Clean up dmap. Do not add back to free list */ dmap_remove_busy_list(fcd, dmap); dmap->inode = NULL; dmap->itn.start = dmap->itn.last = 0; pr_debug("fuse: %s: inline reclaimed memory range. inode=%p, window_offset=0x%llx, length=0x%llx\n", __func__, inode, dmap->window_offset, dmap->length); out_write_dmap_sem: up_write(&fi->dax->sem); out_mmap_sem: filemap_invalidate_unlock(inode->i_mapping); return dmap; } static struct fuse_dax_mapping * alloc_dax_mapping_reclaim(struct fuse_conn_dax *fcd, struct inode *inode) { struct fuse_dax_mapping *dmap; struct fuse_inode *fi = get_fuse_inode(inode); while (1) { bool retry = false; dmap = alloc_dax_mapping(fcd); if (dmap) return dmap; dmap = inode_inline_reclaim_one_dmap(fcd, inode, &retry); /* * Either we got a mapping or it is an error, return in both * the cases. */ if (dmap) return dmap; /* If we could not reclaim a mapping because it * had a reference or some other temporary failure, * Try again. We want to give up inline reclaim only * if there is no range assigned to this node. Otherwise * if a deadlock is possible if we sleep with * mapping->invalidate_lock held and worker to free memory * can't make progress due to unavailability of * mapping->invalidate_lock. So sleep only if fi->dax->nr=0 */ if (retry) continue; /* * There are no mappings which can be reclaimed. Wait for one. * We are not holding fi->dax->sem. So it is possible * that range gets added now. But as we are not holding * mapping->invalidate_lock, worker should still be able to * free up a range and wake us up. */ if (!fi->dax->nr && !(fcd->nr_free_ranges > 0)) { if (wait_event_killable_exclusive(fcd->range_waitq, (fcd->nr_free_ranges > 0))) { return ERR_PTR(-EINTR); } } } } static int lookup_and_reclaim_dmap_locked(struct fuse_conn_dax *fcd, struct inode *inode, unsigned long start_idx) { int ret; struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_dax_mapping *dmap; struct interval_tree_node *node; /* Find fuse dax mapping at file offset inode. */ node = interval_tree_iter_first(&fi->dax->tree, start_idx, start_idx); /* Range already got cleaned up by somebody else */ if (!node) return 0; dmap = node_to_dmap(node); /* still in use. */ if (refcount_read(&dmap->refcnt) > 1) return 0; ret = reclaim_one_dmap_locked(inode, dmap); if (ret < 0) return ret; /* Cleanup dmap entry and add back to free list */ spin_lock(&fcd->lock); dmap_reinit_add_to_free_pool(fcd, dmap); spin_unlock(&fcd->lock); return ret; } /* * Free a range of memory. * Locking: * 1. Take mapping->invalidate_lock to block dax faults. * 2. Take fi->dax->sem to protect interval tree and also to make sure * read/write can not reuse a dmap which we might be freeing. */ static int lookup_and_reclaim_dmap(struct fuse_conn_dax *fcd, struct inode *inode, unsigned long start_idx, unsigned long end_idx) { int ret; struct fuse_inode *fi = get_fuse_inode(inode); loff_t dmap_start = start_idx << FUSE_DAX_SHIFT; loff_t dmap_end = (dmap_start + FUSE_DAX_SZ) - 1; filemap_invalidate_lock(inode->i_mapping); ret = fuse_dax_break_layouts(inode, dmap_start, dmap_end); if (ret) { pr_debug("virtio_fs: fuse_dax_break_layouts() failed. err=%d\n", ret); goto out_mmap_sem; } down_write(&fi->dax->sem); ret = lookup_and_reclaim_dmap_locked(fcd, inode, start_idx); up_write(&fi->dax->sem); out_mmap_sem: filemap_invalidate_unlock(inode->i_mapping); return ret; } static int try_to_free_dmap_chunks(struct fuse_conn_dax *fcd, unsigned long nr_to_free) { struct fuse_dax_mapping *dmap, *pos, *temp; int ret, nr_freed = 0; unsigned long start_idx = 0, end_idx = 0; struct inode *inode = NULL; /* Pick first busy range and free it for now*/ while (1) { if (nr_freed >= nr_to_free) break; dmap = NULL; spin_lock(&fcd->lock); if (!fcd->nr_busy_ranges) { spin_unlock(&fcd->lock); return 0; } list_for_each_entry_safe(pos, temp, &fcd->busy_ranges, busy_list) { /* skip this range if it's in use. */ if (refcount_read(&pos->refcnt) > 1) continue; inode = igrab(pos->inode); /* * This inode is going away. That will free * up all the ranges anyway, continue to * next range. */ if (!inode) continue; /* * Take this element off list and add it tail. If * this element can't be freed, it will help with * selecting new element in next iteration of loop. */ dmap = pos; list_move_tail(&dmap->busy_list, &fcd->busy_ranges); start_idx = end_idx = dmap->itn.start; break; } spin_unlock(&fcd->lock); if (!dmap) return 0; ret = lookup_and_reclaim_dmap(fcd, inode, start_idx, end_idx); iput(inode); if (ret) return ret; nr_freed++; } return 0; } static void fuse_dax_free_mem_worker(struct work_struct *work) { int ret; struct fuse_conn_dax *fcd = container_of(work, struct fuse_conn_dax, free_work.work); ret = try_to_free_dmap_chunks(fcd, FUSE_DAX_RECLAIM_CHUNK); if (ret) { pr_debug("fuse: try_to_free_dmap_chunks() failed with err=%d\n", ret); } /* If number of free ranges are still below threshold, requeue */ kick_dmap_free_worker(fcd, 1); } static void fuse_free_dax_mem_ranges(struct list_head *mem_list) { struct fuse_dax_mapping *range, *temp; /* Free All allocated elements */ list_for_each_entry_safe(range, temp, mem_list, list) { list_del(&range->list); if (!list_empty(&range->busy_list)) list_del(&range->busy_list); kfree(range); } } void fuse_dax_conn_free(struct fuse_conn *fc) { if (fc->dax) { fuse_free_dax_mem_ranges(&fc->dax->free_ranges); kfree(fc->dax); fc->dax = NULL; } } static int fuse_dax_mem_range_init(struct fuse_conn_dax *fcd) { long nr_pages, nr_ranges; struct fuse_dax_mapping *range; int ret, id; size_t dax_size = -1; unsigned long i; init_waitqueue_head(&fcd->range_waitq); INIT_LIST_HEAD(&fcd->free_ranges); INIT_LIST_HEAD(&fcd->busy_ranges); INIT_DELAYED_WORK(&fcd->free_work, fuse_dax_free_mem_worker); id = dax_read_lock(); nr_pages = dax_direct_access(fcd->dev, 0, PHYS_PFN(dax_size), DAX_ACCESS, NULL, NULL); dax_read_unlock(id); if (nr_pages < 0) { pr_debug("dax_direct_access() returned %ld\n", nr_pages); return nr_pages; } nr_ranges = nr_pages/FUSE_DAX_PAGES; pr_debug("%s: dax mapped %ld pages. nr_ranges=%ld\n", __func__, nr_pages, nr_ranges); for (i = 0; i < nr_ranges; i++) { range = kzalloc(sizeof(struct fuse_dax_mapping), GFP_KERNEL); ret = -ENOMEM; if (!range) goto out_err; /* TODO: This offset only works if virtio-fs driver is not * having some memory hidden at the beginning. This needs * better handling */ range->window_offset = i * FUSE_DAX_SZ; range->length = FUSE_DAX_SZ; INIT_LIST_HEAD(&range->busy_list); refcount_set(&range->refcnt, 1); list_add_tail(&range->list, &fcd->free_ranges); } fcd->nr_free_ranges = nr_ranges; fcd->nr_ranges = nr_ranges; return 0; out_err: /* Free All allocated elements */ fuse_free_dax_mem_ranges(&fcd->free_ranges); return ret; } int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode, struct dax_device *dax_dev) { struct fuse_conn_dax *fcd; int err; fc->dax_mode = dax_mode; if (!dax_dev) return 0; fcd = kzalloc(sizeof(*fcd), GFP_KERNEL); if (!fcd) return -ENOMEM; spin_lock_init(&fcd->lock); fcd->dev = dax_dev; err = fuse_dax_mem_range_init(fcd); if (err) { kfree(fcd); return err; } fc->dax = fcd; return 0; } bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi) { struct fuse_conn *fc = get_fuse_conn_super(sb); fi->dax = NULL; if (fc->dax) { fi->dax = kzalloc(sizeof(*fi->dax), GFP_KERNEL_ACCOUNT); if (!fi->dax) return false; init_rwsem(&fi->dax->sem); fi->dax->tree = RB_ROOT_CACHED; } return true; } static const struct address_space_operations fuse_dax_file_aops = { .direct_IO = noop_direct_IO, .dirty_folio = noop_dirty_folio, }; static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags) { struct fuse_conn *fc = get_fuse_conn(inode); enum fuse_dax_mode dax_mode = fc->dax_mode; if (dax_mode == FUSE_DAX_NEVER) return false; /* * fc->dax may be NULL in 'inode' mode when filesystem device doesn't * support DAX, in which case it will silently fallback to 'never' mode. */ if (!fc->dax) return false; if (dax_mode == FUSE_DAX_ALWAYS) return true; /* dax_mode is FUSE_DAX_INODE* */ return fc->inode_dax && (flags & FUSE_ATTR_DAX); } void fuse_dax_inode_init(struct inode *inode, unsigned int flags) { if (!fuse_should_enable_dax(inode, flags)) return; inode->i_flags |= S_DAX; inode->i_data.a_ops = &fuse_dax_file_aops; } void fuse_dax_dontcache(struct inode *inode, unsigned int flags) { struct fuse_conn *fc = get_fuse_conn(inode); if (fuse_is_inode_dax_mode(fc->dax_mode) && ((bool) IS_DAX(inode) != (bool) (flags & FUSE_ATTR_DAX))) d_mark_dontcache(inode); } bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment) { if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) { pr_warn("FUSE: map_alignment %u incompatible with dax mem range size %u\n", map_alignment, FUSE_DAX_SZ); return false; } return true; } void fuse_dax_cancel_work(struct fuse_conn *fc) { struct fuse_conn_dax *fcd = fc->dax; if (fcd) cancel_delayed_work_sync(&fcd->free_work); } EXPORT_SYMBOL_GPL(fuse_dax_cancel_work); |
80 30 81 1 4 23 5 81 82 163 3 80 80 156 164 163 106 104 7 163 155 105 105 7 7 7 25 9 16 7 7 19 9 1 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 | // SPDX-License-Identifier: GPL-2.0-or-later /* Local endpoint object management * * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/udp.h> #include <linux/ip.h> #include <linux/hashtable.h> #include <net/sock.h> #include <net/udp.h> #include <net/udp_tunnel.h> #include <net/af_rxrpc.h> #include "ar-internal.h" static void rxrpc_local_rcu(struct rcu_head *); /* * Handle an ICMP/ICMP6 error turning up at the tunnel. Push it through the * usual mechanism so that it gets parsed and presented through the UDP * socket's error_report(). */ static void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { if (ip_hdr(skb)->version == IPVERSION) return ip_icmp_error(sk, skb, err, port, info, payload); if (IS_ENABLED(CONFIG_AF_RXRPC_IPV6)) return ipv6_icmp_error(sk, skb, err, port, info, payload); } /* * Set or clear the Don't Fragment flag on a socket. */ void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set) { if (set) ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DO); else ip_sock_set_mtu_discover(local->socket->sk, IP_PMTUDISC_DONT); } /* * Compare a local to an address. Return -ve, 0 or +ve to indicate less than, * same or greater than. * * We explicitly don't compare the RxRPC service ID as we want to reject * conflicting uses by differing services. Further, we don't want to share * addresses with different options (IPv6), so we don't compare those bits * either. */ static long rxrpc_local_cmp_key(const struct rxrpc_local *local, const struct sockaddr_rxrpc *srx) { long diff; diff = ((local->srx.transport_type - srx->transport_type) ?: (local->srx.transport_len - srx->transport_len) ?: (local->srx.transport.family - srx->transport.family)); if (diff != 0) return diff; switch (srx->transport.family) { case AF_INET: /* If the choice of UDP port is left up to the transport, then * the endpoint record doesn't match. */ return ((u16 __force)local->srx.transport.sin.sin_port - (u16 __force)srx->transport.sin.sin_port) ?: memcmp(&local->srx.transport.sin.sin_addr, &srx->transport.sin.sin_addr, sizeof(struct in_addr)); #ifdef CONFIG_AF_RXRPC_IPV6 case AF_INET6: /* If the choice of UDP6 port is left up to the transport, then * the endpoint record doesn't match. */ return ((u16 __force)local->srx.transport.sin6.sin6_port - (u16 __force)srx->transport.sin6.sin6_port) ?: memcmp(&local->srx.transport.sin6.sin6_addr, &srx->transport.sin6.sin6_addr, sizeof(struct in6_addr)); #endif default: BUG(); } } static void rxrpc_client_conn_reap_timeout(struct timer_list *timer) { struct rxrpc_local *local = container_of(timer, struct rxrpc_local, client_conn_reap_timer); if (!local->kill_all_client_conns && test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags)) rxrpc_wake_up_io_thread(local); } /* * Allocate a new local endpoint. */ static struct rxrpc_local *rxrpc_alloc_local(struct net *net, const struct sockaddr_rxrpc *srx) { struct rxrpc_local *local; u32 tmp; local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); if (local) { refcount_set(&local->ref, 1); atomic_set(&local->active_users, 1); local->net = net; local->rxnet = rxrpc_net(net); INIT_HLIST_NODE(&local->link); init_completion(&local->io_thread_ready); #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY skb_queue_head_init(&local->rx_delay_queue); #endif skb_queue_head_init(&local->rx_queue); INIT_LIST_HEAD(&local->conn_attend_q); INIT_LIST_HEAD(&local->call_attend_q); local->client_bundles = RB_ROOT; spin_lock_init(&local->client_bundles_lock); local->kill_all_client_conns = false; INIT_LIST_HEAD(&local->idle_client_conns); timer_setup(&local->client_conn_reap_timer, rxrpc_client_conn_reap_timeout, 0); spin_lock_init(&local->lock); rwlock_init(&local->services_lock); local->debug_id = atomic_inc_return(&rxrpc_debug_id); memcpy(&local->srx, srx, sizeof(*srx)); local->srx.srx_service = 0; idr_init(&local->conn_ids); get_random_bytes(&tmp, sizeof(tmp)); tmp &= 0x3fffffff; if (tmp == 0) tmp = 1; idr_set_cursor(&local->conn_ids, tmp); INIT_LIST_HEAD(&local->new_client_calls); spin_lock_init(&local->client_call_lock); trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1); } _leave(" = %p", local); return local; } /* * create the local socket * - must be called with rxrpc_local_mutex locked */ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) { struct udp_tunnel_sock_cfg tuncfg = {NULL}; struct sockaddr_rxrpc *srx = &local->srx; struct udp_port_cfg udp_conf = {0}; struct task_struct *io_thread; struct sock *usk; int ret; _enter("%p{%d,%d}", local, srx->transport_type, srx->transport.family); udp_conf.family = srx->transport.family; udp_conf.use_udp_checksums = true; if (udp_conf.family == AF_INET) { udp_conf.local_ip = srx->transport.sin.sin_addr; udp_conf.local_udp_port = srx->transport.sin.sin_port; #if IS_ENABLED(CONFIG_AF_RXRPC_IPV6) } else { udp_conf.local_ip6 = srx->transport.sin6.sin6_addr; udp_conf.local_udp_port = srx->transport.sin6.sin6_port; udp_conf.use_udp6_tx_checksums = true; udp_conf.use_udp6_rx_checksums = true; #endif } ret = udp_sock_create(net, &udp_conf, &local->socket); if (ret < 0) { _leave(" = %d [socket]", ret); return ret; } tuncfg.encap_type = UDP_ENCAP_RXRPC; tuncfg.encap_rcv = rxrpc_encap_rcv; tuncfg.encap_err_rcv = rxrpc_encap_err_rcv; tuncfg.sk_user_data = local; setup_udp_tunnel_sock(net, local->socket, &tuncfg); /* set the socket up */ usk = local->socket->sk; usk->sk_error_report = rxrpc_error_report; switch (srx->transport.family) { case AF_INET6: /* we want to receive ICMPv6 errors */ ip6_sock_set_recverr(usk); /* Fall through and set IPv4 options too otherwise we don't get * errors from IPv4 packets sent through the IPv6 socket. */ fallthrough; case AF_INET: /* we want to receive ICMP errors */ ip_sock_set_recverr(usk); /* we want to set the don't fragment bit */ rxrpc_local_dont_fragment(local, true); break; default: BUG(); } io_thread = kthread_run(rxrpc_io_thread, local, "krxrpcio/%u", ntohs(udp_conf.local_udp_port)); if (IS_ERR(io_thread)) { ret = PTR_ERR(io_thread); goto error_sock; } wait_for_completion(&local->io_thread_ready); WRITE_ONCE(local->io_thread, io_thread); _leave(" = 0"); return 0; error_sock: kernel_sock_shutdown(local->socket, SHUT_RDWR); local->socket->sk->sk_user_data = NULL; sock_release(local->socket); local->socket = NULL; return ret; } /* * Look up or create a new local endpoint using the specified local address. */ struct rxrpc_local *rxrpc_lookup_local(struct net *net, const struct sockaddr_rxrpc *srx) { struct rxrpc_local *local; struct rxrpc_net *rxnet = rxrpc_net(net); struct hlist_node *cursor; long diff; int ret; _enter("{%d,%d,%pISp}", srx->transport_type, srx->transport.family, &srx->transport); mutex_lock(&rxnet->local_mutex); hlist_for_each(cursor, &rxnet->local_endpoints) { local = hlist_entry(cursor, struct rxrpc_local, link); diff = rxrpc_local_cmp_key(local, srx); if (diff != 0) continue; /* Services aren't allowed to share transport sockets, so * reject that here. It is possible that the object is dying - * but it may also still have the local transport address that * we want bound. */ if (srx->srx_service) { local = NULL; goto addr_in_use; } /* Found a match. We want to replace a dying object. * Attempting to bind the transport socket may still fail if * we're attempting to use a local address that the dying * object is still using. */ if (!rxrpc_use_local(local, rxrpc_local_use_lookup)) break; goto found; } local = rxrpc_alloc_local(net, srx); if (!local) goto nomem; ret = rxrpc_open_socket(local, net); if (ret < 0) goto sock_error; if (cursor) { hlist_replace_rcu(cursor, &local->link); cursor->pprev = NULL; } else { hlist_add_head_rcu(&local->link, &rxnet->local_endpoints); } found: mutex_unlock(&rxnet->local_mutex); _leave(" = %p", local); return local; nomem: ret = -ENOMEM; sock_error: mutex_unlock(&rxnet->local_mutex); if (local) call_rcu(&local->rcu, rxrpc_local_rcu); _leave(" = %d", ret); return ERR_PTR(ret); addr_in_use: mutex_unlock(&rxnet->local_mutex); _leave(" = -EADDRINUSE"); return ERR_PTR(-EADDRINUSE); } /* * Get a ref on a local endpoint. */ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local, enum rxrpc_local_trace why) { int r, u; u = atomic_read(&local->active_users); __refcount_inc(&local->ref, &r); trace_rxrpc_local(local->debug_id, why, r + 1, u); return local; } /* * Get a ref on a local endpoint unless its usage has already reached 0. */ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local, enum rxrpc_local_trace why) { int r, u; if (local && __refcount_inc_not_zero(&local->ref, &r)) { u = atomic_read(&local->active_users); trace_rxrpc_local(local->debug_id, why, r + 1, u); return local; } return NULL; } /* * Drop a ref on a local endpoint. */ void rxrpc_put_local(struct rxrpc_local *local, enum rxrpc_local_trace why) { unsigned int debug_id; bool dead; int r, u; if (local) { debug_id = local->debug_id; u = atomic_read(&local->active_users); dead = __refcount_dec_and_test(&local->ref, &r); trace_rxrpc_local(debug_id, why, r, u); if (dead) call_rcu(&local->rcu, rxrpc_local_rcu); } } /* * Start using a local endpoint. */ struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local, enum rxrpc_local_trace why) { local = rxrpc_get_local_maybe(local, rxrpc_local_get_for_use); if (!local) return NULL; if (!__rxrpc_use_local(local, why)) { rxrpc_put_local(local, rxrpc_local_put_for_use); return NULL; } return local; } /* * Cease using a local endpoint. Once the number of active users reaches 0, we * start the closure of the transport in the I/O thread.. */ void rxrpc_unuse_local(struct rxrpc_local *local, enum rxrpc_local_trace why) { unsigned int debug_id; int r, u; if (local) { debug_id = local->debug_id; r = refcount_read(&local->ref); u = atomic_dec_return(&local->active_users); trace_rxrpc_local(debug_id, why, r, u); if (u == 0) kthread_stop(local->io_thread); } } /* * Destroy a local endpoint's socket and then hand the record to RCU to dispose * of. * * Closing the socket cannot be done from bottom half context or RCU callback * context because it might sleep. */ void rxrpc_destroy_local(struct rxrpc_local *local) { struct socket *socket = local->socket; struct rxrpc_net *rxnet = local->rxnet; _enter("%d", local->debug_id); local->dead = true; mutex_lock(&rxnet->local_mutex); hlist_del_init_rcu(&local->link); mutex_unlock(&rxnet->local_mutex); rxrpc_clean_up_local_conns(local); rxrpc_service_connection_reaper(&rxnet->service_conn_reaper); ASSERT(!local->service); if (socket) { local->socket = NULL; kernel_sock_shutdown(socket, SHUT_RDWR); socket->sk->sk_user_data = NULL; sock_release(socket); } /* At this point, there should be no more packets coming in to the * local endpoint. */ #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY rxrpc_purge_queue(&local->rx_delay_queue); #endif rxrpc_purge_queue(&local->rx_queue); rxrpc_purge_client_connections(local); page_frag_cache_drain(&local->tx_alloc); } /* * Destroy a local endpoint after the RCU grace period expires. */ static void rxrpc_local_rcu(struct rcu_head *rcu) { struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu); rxrpc_see_local(local, rxrpc_local_free); kfree(local); } /* * Verify the local endpoint list is empty by this point. */ void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet) { struct rxrpc_local *local; _enter(""); flush_workqueue(rxrpc_workqueue); if (!hlist_empty(&rxnet->local_endpoints)) { mutex_lock(&rxnet->local_mutex); hlist_for_each_entry(local, &rxnet->local_endpoints, link) { pr_err("AF_RXRPC: Leaked local %p {%d}\n", local, refcount_read(&local->ref)); } mutex_unlock(&rxnet->local_mutex); BUG(); } } |
2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Nano River Technologies viperboard i2c controller driver * * (C) 2012 by Lemonage GmbH * Author: Lars Poeschel <poeschel@lemonage.de> * All rights reserved. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/mfd/viperboard.h> struct vprbrd_i2c { struct i2c_adapter i2c; u8 bus_freq_param; }; /* i2c bus frequency module parameter */ static u8 i2c_bus_param; static unsigned int i2c_bus_freq = 100; module_param(i2c_bus_freq, int, 0); MODULE_PARM_DESC(i2c_bus_freq, "i2c bus frequency in khz (default is 100) valid values: 10, 100, 200, 400, 1000, 3000, 6000"); static int vprbrd_i2c_status(struct i2c_adapter *i2c, struct vprbrd_i2c_status *status, bool prev_error) { u16 bytes_xfer; int ret; struct vprbrd *vb = (struct vprbrd *)i2c->algo_data; /* check for protocol error */ bytes_xfer = sizeof(struct vprbrd_i2c_status); ret = usb_control_msg(vb->usb_dev, usb_rcvctrlpipe(vb->usb_dev, 0), VPRBRD_USB_REQUEST_I2C, VPRBRD_USB_TYPE_IN, 0x0000, 0x0000, status, bytes_xfer, VPRBRD_USB_TIMEOUT_MS); if (ret != bytes_xfer) prev_error = true; if (prev_error) { dev_err(&i2c->dev, "failure in usb communication\n"); return -EREMOTEIO; } dev_dbg(&i2c->dev, " status = %d\n", status->status); if (status->status != 0x00) { dev_err(&i2c->dev, "failure: i2c protocol error\n"); return -EPROTO; } return 0; } static int vprbrd_i2c_receive(struct usb_device *usb_dev, struct vprbrd_i2c_read_msg *rmsg, int bytes_xfer) { int ret, bytes_actual; int error = 0; /* send the read request */ ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), rmsg, sizeof(struct vprbrd_i2c_read_hdr), &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_actual != sizeof(struct vprbrd_i2c_read_hdr))) { dev_err(&usb_dev->dev, "failure transmitting usb\n"); error = -EREMOTEIO; } /* read the actual data */ ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, VPRBRD_EP_IN), rmsg, bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_xfer != bytes_actual)) { dev_err(&usb_dev->dev, "failure receiving usb\n"); error = -EREMOTEIO; } return error; } static int vprbrd_i2c_addr(struct usb_device *usb_dev, struct vprbrd_i2c_addr_msg *amsg) { int ret, bytes_actual; ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, VPRBRD_EP_OUT), amsg, sizeof(struct vprbrd_i2c_addr_msg), &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (sizeof(struct vprbrd_i2c_addr_msg) != bytes_actual)) { dev_err(&usb_dev->dev, "failure transmitting usb\n"); return -EREMOTEIO; } return 0; } static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg) { int ret; u16 remain_len, len1, len2, start = 0x0000; struct vprbrd_i2c_read_msg *rmsg = (struct vprbrd_i2c_read_msg *)vb->buf; remain_len = msg->len; rmsg->header.cmd = VPRBRD_I2C_CMD_READ; while (remain_len > 0) { rmsg->header.addr = cpu_to_le16(start + 0x4000); if (remain_len <= 255) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len; rmsg->header.len1 = 0x00; rmsg->header.len2 = 0x00; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 510) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len - 255; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0x00; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 512) { len1 = remain_len; len2 = 0x00; rmsg->header.len0 = remain_len - 510; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = 0x00; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 767) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 512; rmsg->header.len4 = 0x00; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 1022) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 767; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0x00; remain_len = 0; } else if (remain_len <= 1024) { len1 = 512; len2 = remain_len - 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = remain_len - 1022; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0xff; remain_len = 0; } else { len1 = 512; len2 = 512; rmsg->header.len0 = 0x02; rmsg->header.len1 = 0xff; rmsg->header.len2 = 0xff; rmsg->header.len3 = 0x02; rmsg->header.len4 = 0xff; rmsg->header.len5 = 0xff; remain_len -= 1024; start += 1024; } rmsg->header.tf1 = cpu_to_le16(len1); rmsg->header.tf2 = cpu_to_le16(len2); /* first read transfer */ ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len1); if (ret < 0) return ret; /* copy the received data */ memcpy(msg->buf + start, rmsg, len1); /* second read transfer if neccessary */ if (len2 > 0) { ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2); if (ret < 0) return ret; /* copy the received data */ memcpy(msg->buf + start + 512, rmsg, len2); } } return 0; } static int vprbrd_i2c_write(struct vprbrd *vb, struct i2c_msg *msg) { int ret, bytes_actual; u16 remain_len, bytes_xfer, start = 0x0000; struct vprbrd_i2c_write_msg *wmsg = (struct vprbrd_i2c_write_msg *)vb->buf; remain_len = msg->len; wmsg->header.cmd = VPRBRD_I2C_CMD_WRITE; wmsg->header.last = 0x00; wmsg->header.chan = 0x00; wmsg->header.spi = 0x0000; while (remain_len > 0) { wmsg->header.addr = cpu_to_le16(start + 0x4000); if (remain_len > 503) { wmsg->header.len1 = 0xff; wmsg->header.len2 = 0xf8; remain_len -= 503; bytes_xfer = 503 + sizeof(struct vprbrd_i2c_write_hdr); start += 503; } else if (remain_len > 255) { wmsg->header.len1 = 0xff; wmsg->header.len2 = (remain_len - 255); bytes_xfer = remain_len + sizeof(struct vprbrd_i2c_write_hdr); remain_len = 0; } else { wmsg->header.len1 = remain_len; wmsg->header.len2 = 0x00; bytes_xfer = remain_len + sizeof(struct vprbrd_i2c_write_hdr); remain_len = 0; } memcpy(wmsg->data, msg->buf + start, bytes_xfer - sizeof(struct vprbrd_i2c_write_hdr)); ret = usb_bulk_msg(vb->usb_dev, usb_sndbulkpipe(vb->usb_dev, VPRBRD_EP_OUT), wmsg, bytes_xfer, &bytes_actual, VPRBRD_USB_TIMEOUT_MS); if ((ret < 0) || (bytes_xfer != bytes_actual)) return -EREMOTEIO; } return 0; } static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int i, ret, error = 0; struct vprbrd *vb = (struct vprbrd *)i2c->algo_data; struct vprbrd_i2c_addr_msg *amsg = (struct vprbrd_i2c_addr_msg *)vb->buf; struct vprbrd_i2c_status *smsg = (struct vprbrd_i2c_status *)vb->buf; for (i = 0 ; i < num ; i++) { pmsg = &msgs[i]; dev_dbg(&i2c->dev, " %d: %s (flags %d) %d bytes to 0x%02x\n", i, pmsg->flags & I2C_M_RD ? "read" : "write", pmsg->flags, pmsg->len, pmsg->addr); mutex_lock(&vb->lock); /* directly send the message */ if (pmsg->flags & I2C_M_RD) { /* read data */ amsg->cmd = VPRBRD_I2C_CMD_ADDR; amsg->unknown2 = 0x00; amsg->unknown3 = 0x00; amsg->addr = pmsg->addr; amsg->unknown1 = 0x01; amsg->len = cpu_to_le16(pmsg->len); /* send the addr and len, we're interested to board */ ret = vprbrd_i2c_addr(vb->usb_dev, amsg); if (ret < 0) error = ret; ret = vprbrd_i2c_read(vb, pmsg); if (ret < 0) error = ret; ret = vprbrd_i2c_status(i2c, smsg, error); if (ret < 0) error = ret; /* in case of protocol error, return the error */ if (error < 0) goto error; } else { /* write data */ ret = vprbrd_i2c_write(vb, pmsg); amsg->cmd = VPRBRD_I2C_CMD_ADDR; amsg->unknown2 = 0x00; amsg->unknown3 = 0x00; amsg->addr = pmsg->addr; amsg->unknown1 = 0x00; amsg->len = cpu_to_le16(pmsg->len); /* send the addr, the data goes to to board */ ret = vprbrd_i2c_addr(vb->usb_dev, amsg); if (ret < 0) error = ret; ret = vprbrd_i2c_status(i2c, smsg, error); if (ret < 0) error = ret; if (error < 0) goto error; } mutex_unlock(&vb->lock); } return num; error: mutex_unlock(&vb->lock); return error; } static u32 vprbrd_i2c_func(struct i2c_adapter *i2c) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } /* This is the actual algorithm we define */ static const struct i2c_algorithm vprbrd_algorithm = { .xfer = vprbrd_i2c_xfer, .functionality = vprbrd_i2c_func, }; static const struct i2c_adapter_quirks vprbrd_quirks = { .max_read_len = 2048, .max_write_len = 2048, }; static int vprbrd_i2c_probe(struct platform_device *pdev) { struct vprbrd *vb = dev_get_drvdata(pdev->dev.parent); struct vprbrd_i2c *vb_i2c; int ret; int pipe; vb_i2c = devm_kzalloc(&pdev->dev, sizeof(*vb_i2c), GFP_KERNEL); if (vb_i2c == NULL) return -ENOMEM; /* setup i2c adapter description */ vb_i2c->i2c.owner = THIS_MODULE; vb_i2c->i2c.class = I2C_CLASS_HWMON; vb_i2c->i2c.algo = &vprbrd_algorithm; vb_i2c->i2c.quirks = &vprbrd_quirks; vb_i2c->i2c.algo_data = vb; /* save the param in usb capabable memory */ vb_i2c->bus_freq_param = i2c_bus_param; snprintf(vb_i2c->i2c.name, sizeof(vb_i2c->i2c.name), "viperboard at bus %03d device %03d", vb->usb_dev->bus->busnum, vb->usb_dev->devnum); /* setting the bus frequency */ if ((i2c_bus_param <= VPRBRD_I2C_FREQ_10KHZ) && (i2c_bus_param >= VPRBRD_I2C_FREQ_6MHZ)) { pipe = usb_sndctrlpipe(vb->usb_dev, 0); ret = usb_control_msg(vb->usb_dev, pipe, VPRBRD_USB_REQUEST_I2C_FREQ, VPRBRD_USB_TYPE_OUT, 0x0000, 0x0000, &vb_i2c->bus_freq_param, 1, VPRBRD_USB_TIMEOUT_MS); if (ret != 1) { dev_err(&pdev->dev, "failure setting i2c_bus_freq to %d\n", i2c_bus_freq); return -EIO; } } else { dev_err(&pdev->dev, "invalid i2c_bus_freq setting:%d\n", i2c_bus_freq); return -EIO; } vb_i2c->i2c.dev.parent = &pdev->dev; /* attach to i2c layer */ i2c_add_adapter(&vb_i2c->i2c); platform_set_drvdata(pdev, vb_i2c); return 0; } static void vprbrd_i2c_remove(struct platform_device *pdev) { struct vprbrd_i2c *vb_i2c = platform_get_drvdata(pdev); i2c_del_adapter(&vb_i2c->i2c); } static struct platform_driver vprbrd_i2c_driver = { .driver.name = "viperboard-i2c", .probe = vprbrd_i2c_probe, .remove = vprbrd_i2c_remove, }; static int __init vprbrd_i2c_init(void) { switch (i2c_bus_freq) { case 6000: i2c_bus_param = VPRBRD_I2C_FREQ_6MHZ; break; case 3000: i2c_bus_param = VPRBRD_I2C_FREQ_3MHZ; break; case 1000: i2c_bus_param = VPRBRD_I2C_FREQ_1MHZ; break; case 400: i2c_bus_param = VPRBRD_I2C_FREQ_400KHZ; break; case 200: i2c_bus_param = VPRBRD_I2C_FREQ_200KHZ; break; case 100: i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ; break; case 10: i2c_bus_param = VPRBRD_I2C_FREQ_10KHZ; break; default: pr_warn("invalid i2c_bus_freq (%d)\n", i2c_bus_freq); i2c_bus_param = VPRBRD_I2C_FREQ_100KHZ; } return platform_driver_register(&vprbrd_i2c_driver); } subsys_initcall(vprbrd_i2c_init); static void __exit vprbrd_i2c_exit(void) { platform_driver_unregister(&vprbrd_i2c_driver); } module_exit(vprbrd_i2c_exit); MODULE_AUTHOR("Lars Poeschel <poeschel@lemonage.de>"); MODULE_DESCRIPTION("I2C controller driver for Nano River Techs Viperboard"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:viperboard-i2c"); |
220 221 221 509 287 221 8 5 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HID support for Linux * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2007-2008 Oliver Neukum * Copyright (c) 2006-2012 Jiri Kosina * Copyright (c) 2012 Henrik Rydberg */ /* */ #include <linux/module.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/unaligned.h> #include <asm/byteorder.h> #include <linux/hid.h> static struct hid_driver hid_generic; static int __check_hid_generic(struct device_driver *drv, void *data) { struct hid_driver *hdrv = to_hid_driver(drv); struct hid_device *hdev = data; if (hdrv == &hid_generic) return 0; return hid_match_device(hdev, hdrv) != NULL; } static bool hid_generic_match(struct hid_device *hdev, bool ignore_special_driver) { if (ignore_special_driver) return true; if (hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER) return true; if (hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER) return false; /* * If any other driver wants the device, leave the device to this other * driver. */ if (bus_for_each_drv(&hid_bus_type, NULL, hdev, __check_hid_generic)) return false; return true; } static int hid_generic_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; hdev->quirks |= HID_QUIRK_INPUT_PER_APP; ret = hid_parse(hdev); if (ret) return ret; return hid_hw_start(hdev, HID_CONNECT_DEFAULT); } static const struct hid_device_id hid_table[] = { { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, HID_ANY_ID, HID_ANY_ID) }, { } }; MODULE_DEVICE_TABLE(hid, hid_table); static struct hid_driver hid_generic = { .name = "hid-generic", .id_table = hid_table, .match = hid_generic_match, .probe = hid_generic_probe, }; module_hid_driver(hid_generic); MODULE_AUTHOR("Henrik Rydberg"); MODULE_DESCRIPTION("HID generic driver"); MODULE_LICENSE("GPL"); |
374 374 279 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 | #include <linux/dcache.h> #include "internal.h" unsigned name_to_int(const struct qstr *qstr) { const char *name = qstr->name; int len = qstr->len; unsigned n = 0; if (len > 1 && *name == '0') goto out; do { unsigned c = *name++ - '0'; if (c > 9) goto out; if (n >= (~0U-9)/10) goto out; n *= 10; n += c; } while (--len > 0); return n; out: return ~0U; } |
19171 19172 727 179 567 677 677 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 | // SPDX-License-Identifier: GPL-2.0-only /* * Lock-less NULL terminated single linked list * * The basic atomic operation of this list is cmpxchg on long. On * architectures that don't have NMI-safe cmpxchg implementation, the * list can NOT be used in NMI handlers. So code that uses the list in * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying <ying.huang@intel.com> */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/llist.h> /** * llist_add_batch - add several linked entries in batch * @new_first: first entry in batch to be added * @new_last: last entry in batch to be added * @head: the head for your lock-less list * * Return whether list is empty before adding. */ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head) { struct llist_node *first = READ_ONCE(head->first); do { new_last->next = first; } while (!try_cmpxchg(&head->first, &first, new_first)); return !first; } EXPORT_SYMBOL_GPL(llist_add_batch); /** * llist_del_first - delete the first entry of lock-less list * @head: the head for your lock-less list * * If list is empty, return NULL, otherwise, return the first entry * deleted, this is the newest added one. * * Only one llist_del_first user can be used simultaneously with * multiple llist_add users without lock. Because otherwise * llist_del_first, llist_add, llist_add (or llist_del_all, llist_add, * llist_add) sequence in another user may change @head->first->next, * but keep @head->first. If multiple consumers are needed, please * use llist_del_all or use lock between consumers. */ struct llist_node *llist_del_first(struct llist_head *head) { struct llist_node *entry, *next; entry = smp_load_acquire(&head->first); do { if (entry == NULL) return NULL; next = READ_ONCE(entry->next); } while (!try_cmpxchg(&head->first, &entry, next)); return entry; } EXPORT_SYMBOL_GPL(llist_del_first); /** * llist_del_first_this - delete given entry of lock-less list if it is first * @head: the head for your lock-less list * @this: a list entry. * * If head of the list is given entry, delete and return %true else * return %false. * * Multiple callers can safely call this concurrently with multiple * llist_add() callers, providing all the callers offer a different @this. */ bool llist_del_first_this(struct llist_head *head, struct llist_node *this) { struct llist_node *entry, *next; /* acquire ensures orderig wrt try_cmpxchg() is llist_del_first() */ entry = smp_load_acquire(&head->first); do { if (entry != this) return false; next = READ_ONCE(entry->next); } while (!try_cmpxchg(&head->first, &entry, next)); return true; } EXPORT_SYMBOL_GPL(llist_del_first_this); /** * llist_reverse_order - reverse order of a llist chain * @head: first item of the list to be reversed * * Reverse the order of a chain of llist entries and return the * new first entry. */ struct llist_node *llist_reverse_order(struct llist_node *head) { struct llist_node *new_head = NULL; while (head) { struct llist_node *tmp = head; head = head->next; tmp->next = new_head; new_head = tmp; } return new_head; } EXPORT_SYMBOL_GPL(llist_reverse_order); |
21 21 21 21 21 21 21 21 21 21 13 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2009, Christoph Hellwig * All Rights Reserved. * * NOTE: none of these tracepoints shall be considered a stable kernel ABI * as they can change at any time. * * Current conventions for printing numbers measuring specific units: * * agno: allocation group number * * agino: per-AG inode number * ino: filesystem inode number * * agbno: per-AG block number in fs blocks * rgbno: per-rtgroup block number in fs blocks * startblock: physical block number for file mappings. This is either a * segmented fsblock for data device mappings, or a rfsblock * for realtime device mappings * fsbcount: number of blocks in an extent, in fs blocks * * gbno: generic allocation group block number. This is an agbno for * space in a per-AG or a rgbno for space in a realtime group. * * daddr: physical block number in 512b blocks * bbcount: number of blocks in a physical extent, in 512b blocks * * rtx: physical rt extent number for extent mappings * rtxcount: number of rt extents in an extent mapping * * owner: reverse-mapping owner, usually inodes * * fileoff: file offset, in fs blocks * pos: file offset, in bytes * bytecount: number of bytes * * dablk: directory or xattr block offset, in filesystem blocks * * disize: ondisk file size, in bytes * isize: incore file size, in bytes * * forkoff: inode fork offset, in bytes * * ireccount: number of inode records * * Numbers describing space allocations (blocks, extents, inodes) should be * formatted in hexadecimal. */ #undef TRACE_SYSTEM #define TRACE_SYSTEM xfs #if !defined(_TRACE_XFS_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_XFS_H #include <linux/tracepoint.h> struct xfs_agf; struct xfs_alloc_arg; struct xfs_attr_list_context; struct xfs_buf_log_item; struct xfs_da_args; struct xfs_da_node_entry; struct xfs_dquot; struct xfs_log_item; struct xlog; struct xlog_ticket; struct xlog_recover; struct xlog_recover_item; struct xlog_rec_header; struct xlog_in_core; struct xfs_buf_log_format; struct xfs_inode_log_format; struct xfs_bmbt_irec; struct xfs_btree_cur; struct xfs_defer_op_type; struct xfs_refcount_irec; struct xfs_fsmap; struct xfs_fsmap_irec; struct xfs_group; struct xfs_rmap_irec; struct xfs_icreate_log; struct xfs_iunlink_item; struct xfs_owner_info; struct xfs_trans_res; struct xfs_inobt_rec_incore; union xfs_btree_ptr; struct xfs_dqtrx; struct xfs_icwalk; struct xfs_perag; struct xfbtree; struct xfs_btree_ops; struct xfs_bmap_intent; struct xfs_exchmaps_intent; struct xfs_exchmaps_req; struct xfs_exchrange; struct xfs_getparents; struct xfs_parent_irec; struct xfs_attrlist_cursor_kern; struct xfs_extent_free_item; struct xfs_rmap_intent; struct xfs_refcount_intent; struct xfs_metadir_update; struct xfs_rtgroup; struct xfs_open_zone; #define XFS_ATTR_FILTER_FLAGS \ { XFS_ATTR_ROOT, "ROOT" }, \ { XFS_ATTR_SECURE, "SECURE" }, \ { XFS_ATTR_INCOMPLETE, "INCOMPLETE" }, \ { XFS_ATTR_PARENT, "PARENT" } DECLARE_EVENT_CLASS(xfs_attr_list_class, TP_PROTO(struct xfs_attr_list_context *ctx), TP_ARGS(ctx), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(u32, hashval) __field(u32, blkno) __field(u32, offset) __field(void *, buffer) __field(int, bufsize) __field(int, count) __field(int, firstu) __field(int, dupcnt) __field(unsigned int, attr_filter) ), TP_fast_assign( __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; __entry->ino = ctx->dp->i_ino; __entry->hashval = ctx->cursor.hashval; __entry->blkno = ctx->cursor.blkno; __entry->offset = ctx->cursor.offset; __entry->buffer = ctx->buffer; __entry->bufsize = ctx->bufsize; __entry->count = ctx->count; __entry->firstu = ctx->firstu; __entry->attr_filter = ctx->attr_filter; ), TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " "buffer %p size %u count %u firstu %u filter %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->hashval, __entry->blkno, __entry->offset, __entry->dupcnt, __entry->buffer, __entry->bufsize, __entry->count, __entry->firstu, __print_flags(__entry->attr_filter, "|", XFS_ATTR_FILTER_FLAGS) ) ) #define DEFINE_ATTR_LIST_EVENT(name) \ DEFINE_EVENT(xfs_attr_list_class, name, \ TP_PROTO(struct xfs_attr_list_context *ctx), \ TP_ARGS(ctx)) DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf); DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all); DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf); DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf_end); DEFINE_ATTR_LIST_EVENT(xfs_attr_list_full); DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add); DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk); DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound); DEFINE_ATTR_LIST_EVENT(xfs_attr_leaf_list); DEFINE_ATTR_LIST_EVENT(xfs_attr_node_list); TRACE_EVENT(xlog_intent_recovery_failed, TP_PROTO(struct xfs_mount *mp, const struct xfs_defer_op_type *ops, int error), TP_ARGS(mp, ops, error), TP_STRUCT__entry( __field(dev_t, dev) __string(name, ops->name) __field(int, error) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __assign_str(name); __entry->error = error; ), TP_printk("dev %d:%d optype %s error %d", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->error) ); DECLARE_EVENT_CLASS(xfs_perag_class, TP_PROTO(const struct xfs_perag *pag, unsigned long caller_ip), TP_ARGS(pag, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(int, refcount) __field(int, active_refcount) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); __entry->refcount = atomic_read(&pag->pag_group.xg_ref); __entry->active_refcount = atomic_read(&pag->pag_group.xg_active_ref); __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d agno 0x%x passive refs %d active refs %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->refcount, __entry->active_refcount, (char *)__entry->caller_ip) ); #define DEFINE_PERAG_REF_EVENT(name) \ DEFINE_EVENT(xfs_perag_class, name, \ TP_PROTO(const struct xfs_perag *pag, unsigned long caller_ip), \ TP_ARGS(pag, caller_ip)) DEFINE_PERAG_REF_EVENT(xfs_perag_set_inode_tag); DEFINE_PERAG_REF_EVENT(xfs_perag_clear_inode_tag); DEFINE_PERAG_REF_EVENT(xfs_reclaim_inodes_count); TRACE_DEFINE_ENUM(XG_TYPE_AG); TRACE_DEFINE_ENUM(XG_TYPE_RTG); DECLARE_EVENT_CLASS(xfs_group_class, TP_PROTO(struct xfs_group *xg, unsigned long caller_ip), TP_ARGS(xg, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(int, refcount) __field(int, active_refcount) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = xg->xg_mount->m_super->s_dev; __entry->type = xg->xg_type; __entry->agno = xg->xg_gno; __entry->refcount = atomic_read(&xg->xg_ref); __entry->active_refcount = atomic_read(&xg->xg_active_ref); __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d %sno 0x%x passive refs %d active refs %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->refcount, __entry->active_refcount, (char *)__entry->caller_ip) ); #define DEFINE_GROUP_REF_EVENT(name) \ DEFINE_EVENT(xfs_group_class, name, \ TP_PROTO(struct xfs_group *xg, unsigned long caller_ip), \ TP_ARGS(xg, caller_ip)) DEFINE_GROUP_REF_EVENT(xfs_group_get); DEFINE_GROUP_REF_EVENT(xfs_group_hold); DEFINE_GROUP_REF_EVENT(xfs_group_put); DEFINE_GROUP_REF_EVENT(xfs_group_grab); DEFINE_GROUP_REF_EVENT(xfs_group_grab_next_tag); DEFINE_GROUP_REF_EVENT(xfs_group_rele); #ifdef CONFIG_XFS_RT DECLARE_EVENT_CLASS(xfs_zone_class, TP_PROTO(struct xfs_rtgroup *rtg), TP_ARGS(rtg), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_rgnumber_t, rgno) __field(xfs_rgblock_t, used) __field(unsigned int, nr_open) ), TP_fast_assign( struct xfs_mount *mp = rtg_mount(rtg); __entry->dev = mp->m_super->s_dev; __entry->rgno = rtg_rgno(rtg); __entry->used = rtg_rmap(rtg)->i_used_blocks; __entry->nr_open = mp->m_zone_info->zi_nr_open_zones; ), TP_printk("dev %d:%d rgno 0x%x used 0x%x nr_open %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rgno, __entry->used, __entry->nr_open) ); #define DEFINE_ZONE_EVENT(name) \ DEFINE_EVENT(xfs_zone_class, name, \ TP_PROTO(struct xfs_rtgroup *rtg), \ TP_ARGS(rtg)) DEFINE_ZONE_EVENT(xfs_zone_emptied); DEFINE_ZONE_EVENT(xfs_zone_full); DEFINE_ZONE_EVENT(xfs_zone_opened); DEFINE_ZONE_EVENT(xfs_zone_reset); DEFINE_ZONE_EVENT(xfs_zone_gc_target_opened); TRACE_EVENT(xfs_zone_free_blocks, TP_PROTO(struct xfs_rtgroup *rtg, xfs_rgblock_t rgbno, xfs_extlen_t len), TP_ARGS(rtg, rgbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_rgnumber_t, rgno) __field(xfs_rgblock_t, used) __field(xfs_rgblock_t, rgbno) __field(xfs_extlen_t, len) ), TP_fast_assign( __entry->dev = rtg_mount(rtg)->m_super->s_dev; __entry->rgno = rtg_rgno(rtg); __entry->used = rtg_rmap(rtg)->i_used_blocks; __entry->rgbno = rgbno; __entry->len = len; ), TP_printk("dev %d:%d rgno 0x%x used 0x%x rgbno 0x%x len 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rgno, __entry->used, __entry->rgbno, __entry->len) ); DECLARE_EVENT_CLASS(xfs_zone_alloc_class, TP_PROTO(struct xfs_open_zone *oz, xfs_rgblock_t rgbno, xfs_extlen_t len), TP_ARGS(oz, rgbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_rgnumber_t, rgno) __field(xfs_rgblock_t, used) __field(xfs_rgblock_t, written) __field(xfs_rgblock_t, write_pointer) __field(xfs_rgblock_t, rgbno) __field(xfs_extlen_t, len) ), TP_fast_assign( __entry->dev = rtg_mount(oz->oz_rtg)->m_super->s_dev; __entry->rgno = rtg_rgno(oz->oz_rtg); __entry->used = rtg_rmap(oz->oz_rtg)->i_used_blocks; __entry->written = oz->oz_written; __entry->write_pointer = oz->oz_write_pointer; __entry->rgbno = rgbno; __entry->len = len; ), TP_printk("dev %d:%d rgno 0x%x used 0x%x written 0x%x wp 0x%x rgbno 0x%x len 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rgno, __entry->used, __entry->written, __entry->write_pointer, __entry->rgbno, __entry->len) ); #define DEFINE_ZONE_ALLOC_EVENT(name) \ DEFINE_EVENT(xfs_zone_alloc_class, name, \ TP_PROTO(struct xfs_open_zone *oz, xfs_rgblock_t rgbno, \ xfs_extlen_t len), \ TP_ARGS(oz, rgbno, len)) DEFINE_ZONE_ALLOC_EVENT(xfs_zone_record_blocks); DEFINE_ZONE_ALLOC_EVENT(xfs_zone_alloc_blocks); TRACE_EVENT(xfs_zone_gc_select_victim, TP_PROTO(struct xfs_rtgroup *rtg, unsigned int bucket), TP_ARGS(rtg, bucket), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_rgnumber_t, rgno) __field(xfs_rgblock_t, used) __field(unsigned int, bucket) ), TP_fast_assign( __entry->dev = rtg_mount(rtg)->m_super->s_dev; __entry->rgno = rtg_rgno(rtg); __entry->used = rtg_rmap(rtg)->i_used_blocks; __entry->bucket = bucket; ), TP_printk("dev %d:%d rgno 0x%x used 0x%x bucket %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rgno, __entry->used, __entry->bucket) ); TRACE_EVENT(xfs_zones_mount, TP_PROTO(struct xfs_mount *mp), TP_ARGS(mp), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_rgnumber_t, rgcount) __field(uint32_t, blocks) __field(unsigned int, max_open_zones) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->rgcount = mp->m_sb.sb_rgcount; __entry->blocks = mp->m_groups[XG_TYPE_RTG].blocks; __entry->max_open_zones = mp->m_max_open_zones; ), TP_printk("dev %d:%d zoned %u blocks_per_zone %u, max_open %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rgcount, __entry->blocks, __entry->max_open_zones) ); #endif /* CONFIG_XFS_RT */ TRACE_EVENT(xfs_inodegc_worker, TP_PROTO(struct xfs_mount *mp, unsigned int shrinker_hits), TP_ARGS(mp, shrinker_hits), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, shrinker_hits) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->shrinker_hits = shrinker_hits; ), TP_printk("dev %d:%d shrinker_hits %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->shrinker_hits) ); DECLARE_EVENT_CLASS(xfs_fs_class, TP_PROTO(struct xfs_mount *mp, void *caller_ip), TP_ARGS(mp, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long long, mflags) __field(unsigned long, opstate) __field(unsigned long, sbflags) __field(void *, caller_ip) ), TP_fast_assign( if (mp) { __entry->dev = mp->m_super->s_dev; __entry->mflags = mp->m_features; __entry->opstate = mp->m_opstate; __entry->sbflags = mp->m_super->s_flags; } __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d m_features 0x%llx opstate (%s) s_flags 0x%lx caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->mflags, __print_flags(__entry->opstate, "|", XFS_OPSTATE_STRINGS), __entry->sbflags, __entry->caller_ip) ); #define DEFINE_FS_EVENT(name) \ DEFINE_EVENT(xfs_fs_class, name, \ TP_PROTO(struct xfs_mount *mp, void *caller_ip), \ TP_ARGS(mp, caller_ip)) DEFINE_FS_EVENT(xfs_inodegc_flush); DEFINE_FS_EVENT(xfs_inodegc_push); DEFINE_FS_EVENT(xfs_inodegc_start); DEFINE_FS_EVENT(xfs_inodegc_stop); DEFINE_FS_EVENT(xfs_inodegc_queue); DEFINE_FS_EVENT(xfs_inodegc_throttle); DEFINE_FS_EVENT(xfs_fs_sync_fs); DEFINE_FS_EVENT(xfs_blockgc_start); DEFINE_FS_EVENT(xfs_blockgc_stop); DEFINE_FS_EVENT(xfs_blockgc_worker); DEFINE_FS_EVENT(xfs_blockgc_flush_all); TRACE_EVENT(xfs_inodegc_shrinker_scan, TP_PROTO(struct xfs_mount *mp, struct shrink_control *sc, void *caller_ip), TP_ARGS(mp, sc, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, nr_to_scan) __field(void *, caller_ip) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->nr_to_scan = sc->nr_to_scan; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d nr_to_scan %lu caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_to_scan, __entry->caller_ip) ); DECLARE_EVENT_CLASS(xfs_ag_class, TP_PROTO(const struct xfs_perag *pag), TP_ARGS(pag), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); ), TP_printk("dev %d:%d agno 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno) ); #define DEFINE_AG_EVENT(name) \ DEFINE_EVENT(xfs_ag_class, name, \ TP_PROTO(const struct xfs_perag *pag), \ TP_ARGS(pag)) DEFINE_AG_EVENT(xfs_read_agf); DEFINE_AG_EVENT(xfs_alloc_read_agf); DEFINE_AG_EVENT(xfs_read_agi); DEFINE_AG_EVENT(xfs_ialloc_read_agi); TRACE_EVENT(xfs_attr_list_node_descend, TP_PROTO(struct xfs_attr_list_context *ctx, struct xfs_da_node_entry *btree), TP_ARGS(ctx, btree), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(u32, hashval) __field(u32, blkno) __field(u32, offset) __field(void *, buffer) __field(int, bufsize) __field(int, count) __field(int, firstu) __field(int, dupcnt) __field(unsigned int, attr_filter) __field(u32, bt_hashval) __field(u32, bt_before) ), TP_fast_assign( __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; __entry->ino = ctx->dp->i_ino; __entry->hashval = ctx->cursor.hashval; __entry->blkno = ctx->cursor.blkno; __entry->offset = ctx->cursor.offset; __entry->buffer = ctx->buffer; __entry->bufsize = ctx->bufsize; __entry->count = ctx->count; __entry->firstu = ctx->firstu; __entry->attr_filter = ctx->attr_filter; __entry->bt_hashval = be32_to_cpu(btree->hashval); __entry->bt_before = be32_to_cpu(btree->before); ), TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " "buffer %p size %u count %u firstu %u filter %s " "node hashval %u, node before %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->hashval, __entry->blkno, __entry->offset, __entry->dupcnt, __entry->buffer, __entry->bufsize, __entry->count, __entry->firstu, __print_flags(__entry->attr_filter, "|", XFS_ATTR_FILTER_FLAGS), __entry->bt_hashval, __entry->bt_before) ); DECLARE_EVENT_CLASS(xfs_bmap_class, TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state, unsigned long caller_ip), TP_ARGS(ip, cur, state, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(void *, leaf) __field(int, pos) __field(xfs_fileoff_t, startoff) __field(xfs_fsblock_t, startblock) __field(xfs_filblks_t, blockcount) __field(xfs_exntst_t, state) __field(int, bmap_state) __field(unsigned long, caller_ip) ), TP_fast_assign( struct xfs_ifork *ifp; struct xfs_bmbt_irec r; ifp = xfs_iext_state_to_fork(ip, state); xfs_iext_get_extent(ifp, cur, &r); __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->leaf = cur->leaf; __entry->pos = cur->pos; __entry->startoff = r.br_startoff; __entry->startblock = r.br_startblock; __entry->blockcount = r.br_blockcount; __entry->state = r.br_state; __entry->bmap_state = state; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d ino 0x%llx state %s cur %p/%d " "fileoff 0x%llx startblock 0x%llx fsbcount 0x%llx flag %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), __entry->leaf, __entry->pos, __entry->startoff, (int64_t)__entry->startblock, __entry->blockcount, __entry->state, (char *)__entry->caller_ip) ) #define DEFINE_BMAP_EVENT(name) \ DEFINE_EVENT(xfs_bmap_class, name, \ TP_PROTO(struct xfs_inode *ip, struct xfs_iext_cursor *cur, int state, \ unsigned long caller_ip), \ TP_ARGS(ip, cur, state, caller_ip)) DEFINE_BMAP_EVENT(xfs_iext_insert); DEFINE_BMAP_EVENT(xfs_iext_remove); DEFINE_BMAP_EVENT(xfs_bmap_pre_update); DEFINE_BMAP_EVENT(xfs_bmap_post_update); DEFINE_BMAP_EVENT(xfs_read_extent); DEFINE_BMAP_EVENT(xfs_write_extent); DECLARE_EVENT_CLASS(xfs_buf_class, TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), TP_ARGS(bp, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_daddr_t, bno) __field(int, nblks) __field(int, hold) __field(int, pincount) __field(unsigned, lockval) __field(unsigned, flags) __field(unsigned long, caller_ip) __field(const void *, buf_ops) ), TP_fast_assign( __entry->dev = bp->b_target->bt_dev; __entry->bno = xfs_buf_daddr(bp); __entry->nblks = bp->b_length; __entry->hold = bp->b_hold; __entry->pincount = atomic_read(&bp->b_pin_count); __entry->lockval = bp->b_sema.count; __entry->flags = bp->b_flags; __entry->caller_ip = caller_ip; __entry->buf_ops = bp->b_ops; ), TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d " "lock %d flags %s bufops %pS caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->bno, __entry->nblks, __entry->hold, __entry->pincount, __entry->lockval, __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), __entry->buf_ops, (void *)__entry->caller_ip) ) #define DEFINE_BUF_EVENT(name) \ DEFINE_EVENT(xfs_buf_class, name, \ TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \ TP_ARGS(bp, caller_ip)) DEFINE_BUF_EVENT(xfs_buf_init); DEFINE_BUF_EVENT(xfs_buf_free); DEFINE_BUF_EVENT(xfs_buf_hold); DEFINE_BUF_EVENT(xfs_buf_rele); DEFINE_BUF_EVENT(xfs_buf_iodone); DEFINE_BUF_EVENT(xfs_buf_submit); DEFINE_BUF_EVENT(xfs_buf_lock); DEFINE_BUF_EVENT(xfs_buf_lock_done); DEFINE_BUF_EVENT(xfs_buf_trylock_fail); DEFINE_BUF_EVENT(xfs_buf_trylock); DEFINE_BUF_EVENT(xfs_buf_unlock); DEFINE_BUF_EVENT(xfs_buf_iowait); DEFINE_BUF_EVENT(xfs_buf_iowait_done); DEFINE_BUF_EVENT(xfs_buf_delwri_queue); DEFINE_BUF_EVENT(xfs_buf_delwri_queued); DEFINE_BUF_EVENT(xfs_buf_delwri_split); DEFINE_BUF_EVENT(xfs_buf_delwri_pushbuf); DEFINE_BUF_EVENT(xfs_buf_get_uncached); DEFINE_BUF_EVENT(xfs_buf_item_relse); DEFINE_BUF_EVENT(xfs_buf_iodone_async); DEFINE_BUF_EVENT(xfs_buf_error_relse); DEFINE_BUF_EVENT(xfs_buf_drain_buftarg); DEFINE_BUF_EVENT(xfs_trans_read_buf_shut); DEFINE_BUF_EVENT(xfs_buf_backing_folio); DEFINE_BUF_EVENT(xfs_buf_backing_kmem); DEFINE_BUF_EVENT(xfs_buf_backing_vmalloc); DEFINE_BUF_EVENT(xfs_buf_backing_fallback); /* not really buffer traces, but the buf provides useful information */ DEFINE_BUF_EVENT(xfs_btree_corrupt); DEFINE_BUF_EVENT(xfs_reset_dqcounts); /* pass flags explicitly */ DECLARE_EVENT_CLASS(xfs_buf_flags_class, TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), TP_ARGS(bp, flags, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_daddr_t, bno) __field(unsigned int, length) __field(int, hold) __field(int, pincount) __field(unsigned, lockval) __field(unsigned, flags) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = bp->b_target->bt_dev; __entry->bno = xfs_buf_daddr(bp); __entry->length = bp->b_length; __entry->flags = flags; __entry->hold = bp->b_hold; __entry->pincount = atomic_read(&bp->b_pin_count); __entry->lockval = bp->b_sema.count; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d " "lock %d flags %s caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->bno, __entry->length, __entry->hold, __entry->pincount, __entry->lockval, __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), (void *)__entry->caller_ip) ) #define DEFINE_BUF_FLAGS_EVENT(name) \ DEFINE_EVENT(xfs_buf_flags_class, name, \ TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \ TP_ARGS(bp, flags, caller_ip)) DEFINE_BUF_FLAGS_EVENT(xfs_buf_find); DEFINE_BUF_FLAGS_EVENT(xfs_buf_get); DEFINE_BUF_FLAGS_EVENT(xfs_buf_read); DEFINE_BUF_FLAGS_EVENT(xfs_buf_readahead); TRACE_EVENT(xfs_buf_ioerror, TP_PROTO(struct xfs_buf *bp, int error, xfs_failaddr_t caller_ip), TP_ARGS(bp, error, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_daddr_t, bno) __field(unsigned int, length) __field(unsigned, flags) __field(int, hold) __field(int, pincount) __field(unsigned, lockval) __field(int, error) __field(xfs_failaddr_t, caller_ip) ), TP_fast_assign( __entry->dev = bp->b_target->bt_dev; __entry->bno = xfs_buf_daddr(bp); __entry->length = bp->b_length; __entry->hold = bp->b_hold; __entry->pincount = atomic_read(&bp->b_pin_count); __entry->lockval = bp->b_sema.count; __entry->error = error; __entry->flags = bp->b_flags; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d " "lock %d error %d flags %s caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->bno, __entry->length, __entry->hold, __entry->pincount, __entry->lockval, __entry->error, __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), (void *)__entry->caller_ip) ); DECLARE_EVENT_CLASS(xfs_buf_item_class, TP_PROTO(struct xfs_buf_log_item *bip), TP_ARGS(bip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_daddr_t, buf_bno) __field(unsigned int, buf_len) __field(int, buf_hold) __field(int, buf_pincount) __field(int, buf_lockval) __field(unsigned, buf_flags) __field(unsigned, bli_recur) __field(int, bli_refcount) __field(unsigned, bli_flags) __field(unsigned long, li_flags) ), TP_fast_assign( __entry->dev = bip->bli_buf->b_target->bt_dev; __entry->bli_flags = bip->bli_flags; __entry->bli_recur = bip->bli_recur; __entry->bli_refcount = atomic_read(&bip->bli_refcount); __entry->buf_bno = xfs_buf_daddr(bip->bli_buf); __entry->buf_len = bip->bli_buf->b_length; __entry->buf_flags = bip->bli_buf->b_flags; __entry->buf_hold = bip->bli_buf->b_hold; __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count); __entry->buf_lockval = bip->bli_buf->b_sema.count; __entry->li_flags = bip->bli_item.li_flags; ), TP_printk("dev %d:%d daddr 0x%llx bbcount 0x%x hold %d pincount %d " "lock %d flags %s recur %d refcount %d bliflags %s " "liflags %s", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->buf_bno, __entry->buf_len, __entry->buf_hold, __entry->buf_pincount, __entry->buf_lockval, __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), __entry->bli_recur, __entry->bli_refcount, __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) ) #define DEFINE_BUF_ITEM_EVENT(name) \ DEFINE_EVENT(xfs_buf_item_class, name, \ TP_PROTO(struct xfs_buf_log_item *bip), \ TP_ARGS(bip)) DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_ordered); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format_stale); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_ordered); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_release); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed); DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push); DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf); DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur); DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb); DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb_recur); DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf); DEFINE_BUF_ITEM_EVENT(xfs_trans_read_buf_recur); DEFINE_BUF_ITEM_EVENT(xfs_trans_log_buf); DEFINE_BUF_ITEM_EVENT(xfs_trans_brelse); DEFINE_BUF_ITEM_EVENT(xfs_trans_bdetach); DEFINE_BUF_ITEM_EVENT(xfs_trans_bjoin); DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold); DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release); DEFINE_BUF_ITEM_EVENT(xfs_trans_binval); DECLARE_EVENT_CLASS(xfs_filestream_class, TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino), TP_ARGS(pag, ino), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(xfs_agnumber_t, agno) __field(int, streams) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->ino = ino; __entry->agno = pag_agno(pag); __entry->streams = atomic_read(&pag->pagf_fstrms); ), TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->agno, __entry->streams) ) #define DEFINE_FILESTREAM_EVENT(name) \ DEFINE_EVENT(xfs_filestream_class, name, \ TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino), \ TP_ARGS(pag, ino)) DEFINE_FILESTREAM_EVENT(xfs_filestream_free); DEFINE_FILESTREAM_EVENT(xfs_filestream_lookup); DEFINE_FILESTREAM_EVENT(xfs_filestream_scan); TRACE_EVENT(xfs_filestream_pick, TP_PROTO(const struct xfs_perag *pag, xfs_ino_t ino), TP_ARGS(pag, ino), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(xfs_agnumber_t, agno) __field(int, streams) __field(xfs_extlen_t, free) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->ino = ino; __entry->agno = pag_agno(pag); __entry->streams = atomic_read(&pag->pagf_fstrms); __entry->free = pag->pagf_freeblks; ), TP_printk("dev %d:%d ino 0x%llx agno 0x%x streams %d free %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->agno, __entry->streams, __entry->free) ); DECLARE_EVENT_CLASS(xfs_lock_class, TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, unsigned long caller_ip), TP_ARGS(ip, lock_flags, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(int, lock_flags) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->lock_flags = lock_flags; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d ino 0x%llx flags %s caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), (void *)__entry->caller_ip) ) #define DEFINE_LOCK_EVENT(name) \ DEFINE_EVENT(xfs_lock_class, name, \ TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \ unsigned long caller_ip), \ TP_ARGS(ip, lock_flags, caller_ip)) DEFINE_LOCK_EVENT(xfs_ilock); DEFINE_LOCK_EVENT(xfs_ilock_nowait); DEFINE_LOCK_EVENT(xfs_ilock_demote); DEFINE_LOCK_EVENT(xfs_iunlock); DECLARE_EVENT_CLASS(xfs_inode_class, TP_PROTO(struct xfs_inode *ip), TP_ARGS(ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(unsigned long, iflags) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->iflags = ip->i_flags; ), TP_printk("dev %d:%d ino 0x%llx iflags 0x%lx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->iflags) ) #define DEFINE_INODE_EVENT(name) \ DEFINE_EVENT(xfs_inode_class, name, \ TP_PROTO(struct xfs_inode *ip), \ TP_ARGS(ip)) DEFINE_INODE_EVENT(xfs_iget_skip); DEFINE_INODE_EVENT(xfs_iget_recycle); DEFINE_INODE_EVENT(xfs_iget_recycle_fail); DEFINE_INODE_EVENT(xfs_iget_hit); DEFINE_INODE_EVENT(xfs_iget_miss); DEFINE_INODE_EVENT(xfs_getattr); DEFINE_INODE_EVENT(xfs_setattr); DEFINE_INODE_EVENT(xfs_readlink); DEFINE_INODE_EVENT(xfs_inactive_symlink); DEFINE_INODE_EVENT(xfs_alloc_file_space); DEFINE_INODE_EVENT(xfs_free_file_space); DEFINE_INODE_EVENT(xfs_zero_file_space); DEFINE_INODE_EVENT(xfs_collapse_file_space); DEFINE_INODE_EVENT(xfs_insert_file_space); DEFINE_INODE_EVENT(xfs_readdir); #ifdef CONFIG_XFS_POSIX_ACL DEFINE_INODE_EVENT(xfs_get_acl); #endif DEFINE_INODE_EVENT(xfs_vm_bmap); DEFINE_INODE_EVENT(xfs_file_ioctl); DEFINE_INODE_EVENT(xfs_file_compat_ioctl); DEFINE_INODE_EVENT(xfs_ioctl_setattr); DEFINE_INODE_EVENT(xfs_dir_fsync); DEFINE_INODE_EVENT(xfs_file_fsync); DEFINE_INODE_EVENT(xfs_destroy_inode); DEFINE_INODE_EVENT(xfs_update_time); DEFINE_INODE_EVENT(xfs_dquot_dqalloc); DEFINE_INODE_EVENT(xfs_dquot_dqdetach); DEFINE_INODE_EVENT(xfs_inode_set_eofblocks_tag); DEFINE_INODE_EVENT(xfs_inode_clear_eofblocks_tag); DEFINE_INODE_EVENT(xfs_inode_free_eofblocks_invalid); DEFINE_INODE_EVENT(xfs_inode_set_cowblocks_tag); DEFINE_INODE_EVENT(xfs_inode_clear_cowblocks_tag); DEFINE_INODE_EVENT(xfs_inode_free_cowblocks_invalid); DEFINE_INODE_EVENT(xfs_inode_set_reclaimable); DEFINE_INODE_EVENT(xfs_inode_reclaiming); DEFINE_INODE_EVENT(xfs_inode_set_need_inactive); DEFINE_INODE_EVENT(xfs_inode_inactivating); /* * ftrace's __print_symbolic requires that all enum values be wrapped in the * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace * ring buffer. Somehow this was only worth mentioning in the ftrace sample * code. */ TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_SHARED); TRACE_DEFINE_ENUM(XFS_REFC_DOMAIN_COW); DECLARE_EVENT_CLASS(xfs_fault_class, TP_PROTO(struct xfs_inode *ip, unsigned int order), TP_ARGS(ip, order), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(unsigned int, order) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->order = order; ), TP_printk("dev %d:%d ino 0x%llx order %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->order) ) #define DEFINE_FAULT_EVENT(name) \ DEFINE_EVENT(xfs_fault_class, name, \ TP_PROTO(struct xfs_inode *ip, unsigned int order), \ TP_ARGS(ip, order)) DEFINE_FAULT_EVENT(xfs_read_fault); DEFINE_FAULT_EVENT(xfs_write_fault); DECLARE_EVENT_CLASS(xfs_iref_class, TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), TP_ARGS(ip, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(int, count) __field(int, pincount) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->count = atomic_read(&VFS_I(ip)->i_count); __entry->pincount = atomic_read(&ip->i_pincount); __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d ino 0x%llx count %d pincount %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->count, __entry->pincount, (char *)__entry->caller_ip) ) TRACE_EVENT(xfs_iomap_prealloc_size, TP_PROTO(struct xfs_inode *ip, xfs_fsblock_t blocks, int shift, unsigned int writeio_blocks), TP_ARGS(ip, blocks, shift, writeio_blocks), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(xfs_fsblock_t, blocks) __field(int, shift) __field(unsigned int, writeio_blocks) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->blocks = blocks; __entry->shift = shift; __entry->writeio_blocks = writeio_blocks; ), TP_printk("dev %d:%d ino 0x%llx prealloc blocks %llu shift %d " "m_allocsize_blocks %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->blocks, __entry->shift, __entry->writeio_blocks) ) TRACE_EVENT(xfs_irec_merge_pre, TP_PROTO(const struct xfs_perag *pag, const struct xfs_inobt_rec_incore *rec, const struct xfs_inobt_rec_incore *nrec), TP_ARGS(pag, rec, nrec), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agino_t, agino) __field(uint16_t, holemask) __field(xfs_agino_t, nagino) __field(uint16_t, nholemask) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); __entry->agino = rec->ir_startino; __entry->holemask = rec->ir_holemask; __entry->nagino = nrec->ir_startino; __entry->nholemask = nrec->ir_holemask; ), TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x new_agino 0x%x new_holemask 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agino, __entry->holemask, __entry->nagino, __entry->nholemask) ) TRACE_EVENT(xfs_irec_merge_post, TP_PROTO(const struct xfs_perag *pag, const struct xfs_inobt_rec_incore *nrec), TP_ARGS(pag, nrec), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agino_t, agino) __field(uint16_t, holemask) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); __entry->agino = nrec->ir_startino; __entry->holemask = nrec->ir_holemask; ), TP_printk("dev %d:%d agno 0x%x agino 0x%x holemask 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agino, __entry->holemask) ) #define DEFINE_IREF_EVENT(name) \ DEFINE_EVENT(xfs_iref_class, name, \ TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ TP_ARGS(ip, caller_ip)) DEFINE_IREF_EVENT(xfs_irele); DEFINE_IREF_EVENT(xfs_inode_pin); DEFINE_IREF_EVENT(xfs_inode_unpin); DEFINE_IREF_EVENT(xfs_inode_unpin_nowait); DECLARE_EVENT_CLASS(xfs_namespace_class, TP_PROTO(struct xfs_inode *dp, const struct xfs_name *name), TP_ARGS(dp, name), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, dp_ino) __field(int, namelen) __dynamic_array(char, name, name->len) ), TP_fast_assign( __entry->dev = VFS_I(dp)->i_sb->s_dev; __entry->dp_ino = dp->i_ino; __entry->namelen = name->len; memcpy(__get_str(name), name->name, name->len); ), TP_printk("dev %d:%d dp ino 0x%llx name %.*s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->dp_ino, __entry->namelen, __get_str(name)) ) #define DEFINE_NAMESPACE_EVENT(name) \ DEFINE_EVENT(xfs_namespace_class, name, \ TP_PROTO(struct xfs_inode *dp, const struct xfs_name *name), \ TP_ARGS(dp, name)) DEFINE_NAMESPACE_EVENT(xfs_remove); DEFINE_NAMESPACE_EVENT(xfs_link); DEFINE_NAMESPACE_EVENT(xfs_lookup); DEFINE_NAMESPACE_EVENT(xfs_create); DEFINE_NAMESPACE_EVENT(xfs_symlink); TRACE_EVENT(xfs_rename, TP_PROTO(struct xfs_inode *src_dp, struct xfs_inode *target_dp, struct xfs_name *src_name, struct xfs_name *target_name), TP_ARGS(src_dp, target_dp, src_name, target_name), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, src_dp_ino) __field(xfs_ino_t, target_dp_ino) __field(int, src_namelen) __field(int, target_namelen) __dynamic_array(char, src_name, src_name->len) __dynamic_array(char, target_name, target_name->len) ), TP_fast_assign( __entry->dev = VFS_I(src_dp)->i_sb->s_dev; __entry->src_dp_ino = src_dp->i_ino; __entry->target_dp_ino = target_dp->i_ino; __entry->src_namelen = src_name->len; __entry->target_namelen = target_name->len; memcpy(__get_str(src_name), src_name->name, src_name->len); memcpy(__get_str(target_name), target_name->name, target_name->len); ), TP_printk("dev %d:%d src dp ino 0x%llx target dp ino 0x%llx" " src name %.*s target name %.*s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->src_dp_ino, __entry->target_dp_ino, __entry->src_namelen, __get_str(src_name), __entry->target_namelen, __get_str(target_name)) ) DECLARE_EVENT_CLASS(xfs_dquot_class, TP_PROTO(struct xfs_dquot *dqp), TP_ARGS(dqp), TP_STRUCT__entry( __field(dev_t, dev) __field(u32, id) __field(xfs_dqtype_t, type) __field(unsigned, flags) __field(unsigned, nrefs) __field(unsigned long long, res_bcount) __field(unsigned long long, res_rtbcount) __field(unsigned long long, res_icount) __field(unsigned long long, bcount) __field(unsigned long long, rtbcount) __field(unsigned long long, icount) __field(unsigned long long, blk_hardlimit) __field(unsigned long long, blk_softlimit) __field(unsigned long long, rtb_hardlimit) __field(unsigned long long, rtb_softlimit) __field(unsigned long long, ino_hardlimit) __field(unsigned long long, ino_softlimit) ), TP_fast_assign( __entry->dev = dqp->q_mount->m_super->s_dev; __entry->id = dqp->q_id; __entry->type = dqp->q_type; __entry->flags = dqp->q_flags; __entry->nrefs = dqp->q_nrefs; __entry->res_bcount = dqp->q_blk.reserved; __entry->res_rtbcount = dqp->q_rtb.reserved; __entry->res_icount = dqp->q_ino.reserved; __entry->bcount = dqp->q_blk.count; __entry->rtbcount = dqp->q_rtb.count; __entry->icount = dqp->q_ino.count; __entry->blk_hardlimit = dqp->q_blk.hardlimit; __entry->blk_softlimit = dqp->q_blk.softlimit; __entry->rtb_hardlimit = dqp->q_rtb.hardlimit; __entry->rtb_softlimit = dqp->q_rtb.softlimit; __entry->ino_hardlimit = dqp->q_ino.hardlimit; __entry->ino_softlimit = dqp->q_ino.softlimit; ), TP_printk("dev %d:%d id 0x%x type %s flags %s nrefs %u " "res_bc 0x%llx res_rtbc 0x%llx res_ic 0x%llx " "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx " "rtbcnt 0x%llx rtbhardlimit 0x%llx rtbsoftlimit 0x%llx " "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->id, __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS), __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS), __entry->nrefs, __entry->res_bcount, __entry->res_rtbcount, __entry->res_icount, __entry->bcount, __entry->blk_hardlimit, __entry->blk_softlimit, __entry->rtbcount, __entry->rtb_hardlimit, __entry->rtb_softlimit, __entry->icount, __entry->ino_hardlimit, __entry->ino_softlimit) ) #define DEFINE_DQUOT_EVENT(name) \ DEFINE_EVENT(xfs_dquot_class, name, \ TP_PROTO(struct xfs_dquot *dqp), \ TP_ARGS(dqp)) DEFINE_DQUOT_EVENT(xfs_dqadjust); DEFINE_DQUOT_EVENT(xfs_dqreclaim_want); DEFINE_DQUOT_EVENT(xfs_dqreclaim_dirty); DEFINE_DQUOT_EVENT(xfs_dqreclaim_busy); DEFINE_DQUOT_EVENT(xfs_dqreclaim_done); DEFINE_DQUOT_EVENT(xfs_dqattach_found); DEFINE_DQUOT_EVENT(xfs_dqattach_get); DEFINE_DQUOT_EVENT(xfs_dqalloc); DEFINE_DQUOT_EVENT(xfs_dqtobp_read); DEFINE_DQUOT_EVENT(xfs_dqread); DEFINE_DQUOT_EVENT(xfs_dqread_fail); DEFINE_DQUOT_EVENT(xfs_dqget_hit); DEFINE_DQUOT_EVENT(xfs_dqget_miss); DEFINE_DQUOT_EVENT(xfs_dqget_freeing); DEFINE_DQUOT_EVENT(xfs_dqget_dup); DEFINE_DQUOT_EVENT(xfs_dqput); DEFINE_DQUOT_EVENT(xfs_dqput_free); DEFINE_DQUOT_EVENT(xfs_dqrele); DEFINE_DQUOT_EVENT(xfs_dqflush); DEFINE_DQUOT_EVENT(xfs_dqflush_force); DEFINE_DQUOT_EVENT(xfs_dqflush_done); DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_before); DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_after); TRACE_EVENT(xfs_trans_mod_dquot, TP_PROTO(struct xfs_trans *tp, struct xfs_dquot *dqp, unsigned int field, int64_t delta), TP_ARGS(tp, dqp, field, delta), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_dqtype_t, type) __field(unsigned int, flags) __field(unsigned int, dqid) __field(unsigned int, field) __field(int64_t, delta) ), TP_fast_assign( __entry->dev = tp->t_mountp->m_super->s_dev; __entry->type = dqp->q_type; __entry->flags = dqp->q_flags; __entry->dqid = dqp->q_id; __entry->field = field; __entry->delta = delta; ), TP_printk("dev %d:%d dquot id 0x%x type %s flags %s field %s delta %lld", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->dqid, __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS), __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS), __print_flags(__entry->field, "|", XFS_QMOPT_FLAGS), __entry->delta) ); DECLARE_EVENT_CLASS(xfs_dqtrx_class, TP_PROTO(struct xfs_dqtrx *qtrx), TP_ARGS(qtrx), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_dqtype_t, type) __field(unsigned int, flags) __field(u32, dqid) __field(uint64_t, blk_res) __field(int64_t, bcount_delta) __field(int64_t, delbcnt_delta) __field(uint64_t, rtblk_res) __field(uint64_t, rtblk_res_used) __field(int64_t, rtbcount_delta) __field(int64_t, delrtb_delta) __field(uint64_t, ino_res) __field(uint64_t, ino_res_used) __field(int64_t, icount_delta) ), TP_fast_assign( __entry->dev = qtrx->qt_dquot->q_mount->m_super->s_dev; __entry->type = qtrx->qt_dquot->q_type; __entry->flags = qtrx->qt_dquot->q_flags; __entry->dqid = qtrx->qt_dquot->q_id; __entry->blk_res = qtrx->qt_blk_res; __entry->bcount_delta = qtrx->qt_bcount_delta; __entry->delbcnt_delta = qtrx->qt_delbcnt_delta; __entry->rtblk_res = qtrx->qt_rtblk_res; __entry->rtblk_res_used = qtrx->qt_rtblk_res_used; __entry->rtbcount_delta = qtrx->qt_rtbcount_delta; __entry->delrtb_delta = qtrx->qt_delrtb_delta; __entry->ino_res = qtrx->qt_ino_res; __entry->ino_res_used = qtrx->qt_ino_res_used; __entry->icount_delta = qtrx->qt_icount_delta; ), TP_printk("dev %d:%d dquot id 0x%x type %s flags %s " "blk_res %llu bcount_delta %lld delbcnt_delta %lld " "rtblk_res %llu rtblk_res_used %llu rtbcount_delta %lld delrtb_delta %lld " "ino_res %llu ino_res_used %llu icount_delta %lld", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->dqid, __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS), __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS), __entry->blk_res, __entry->bcount_delta, __entry->delbcnt_delta, __entry->rtblk_res, __entry->rtblk_res_used, __entry->rtbcount_delta, __entry->delrtb_delta, __entry->ino_res, __entry->ino_res_used, __entry->icount_delta) ) #define DEFINE_DQTRX_EVENT(name) \ DEFINE_EVENT(xfs_dqtrx_class, name, \ TP_PROTO(struct xfs_dqtrx *qtrx), \ TP_ARGS(qtrx)) DEFINE_DQTRX_EVENT(xfs_trans_apply_dquot_deltas); DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_before); DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_after); DECLARE_EVENT_CLASS(xfs_loggrant_class, TP_PROTO(struct xlog *log, struct xlog_ticket *tic), TP_ARGS(log, tic), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, tic) __field(char, ocnt) __field(char, cnt) __field(int, curr_res) __field(int, unit_res) __field(unsigned int, flags) __field(int, reserveq) __field(int, writeq) __field(uint64_t, grant_reserve_bytes) __field(uint64_t, grant_write_bytes) __field(uint64_t, tail_space) __field(int, curr_cycle) __field(int, curr_block) __field(xfs_lsn_t, tail_lsn) ), TP_fast_assign( __entry->dev = log->l_mp->m_super->s_dev; __entry->tic = (unsigned long)tic; __entry->ocnt = tic->t_ocnt; __entry->cnt = tic->t_cnt; __entry->curr_res = tic->t_curr_res; __entry->unit_res = tic->t_unit_res; __entry->flags = tic->t_flags; __entry->reserveq = list_empty(&log->l_reserve_head.waiters); __entry->writeq = list_empty(&log->l_write_head.waiters); __entry->tail_space = READ_ONCE(log->l_tail_space); __entry->grant_reserve_bytes = __entry->tail_space + atomic64_read(&log->l_reserve_head.grant); __entry->grant_write_bytes = __entry->tail_space + atomic64_read(&log->l_write_head.grant); __entry->curr_cycle = log->l_curr_cycle; __entry->curr_block = log->l_curr_block; __entry->tail_lsn = atomic64_read(&log->l_tail_lsn); ), TP_printk("dev %d:%d tic 0x%lx t_ocnt %u t_cnt %u t_curr_res %u " "t_unit_res %u t_flags %s reserveq %s writeq %s " "tail space %llu grant_reserve_bytes %llu " "grant_write_bytes %llu curr_cycle %d curr_block %d " "tail_cycle %d tail_block %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tic, __entry->ocnt, __entry->cnt, __entry->curr_res, __entry->unit_res, __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), __entry->reserveq ? "empty" : "active", __entry->writeq ? "empty" : "active", __entry->tail_space, __entry->grant_reserve_bytes, __entry->grant_write_bytes, __entry->curr_cycle, __entry->curr_block, CYCLE_LSN(__entry->tail_lsn), BLOCK_LSN(__entry->tail_lsn) ) ) #define DEFINE_LOGGRANT_EVENT(name) \ DEFINE_EVENT(xfs_loggrant_class, name, \ TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \ TP_ARGS(log, tic)) DEFINE_LOGGRANT_EVENT(xfs_log_umount_write); DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep); DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake); DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up); DEFINE_LOGGRANT_EVENT(xfs_log_reserve); DEFINE_LOGGRANT_EVENT(xfs_log_reserve_exit); DEFINE_LOGGRANT_EVENT(xfs_log_regrant); DEFINE_LOGGRANT_EVENT(xfs_log_regrant_exit); DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant); DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant_exit); DEFINE_LOGGRANT_EVENT(xfs_log_ticket_regrant_sub); DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant); DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_sub); DEFINE_LOGGRANT_EVENT(xfs_log_ticket_ungrant_exit); DEFINE_LOGGRANT_EVENT(xfs_log_cil_wait); DEFINE_LOGGRANT_EVENT(xfs_log_cil_return); DECLARE_EVENT_CLASS(xfs_log_item_class, TP_PROTO(struct xfs_log_item *lip), TP_ARGS(lip), TP_STRUCT__entry( __field(dev_t, dev) __field(void *, lip) __field(uint, type) __field(unsigned long, flags) __field(xfs_lsn_t, lsn) ), TP_fast_assign( __entry->dev = lip->li_log->l_mp->m_super->s_dev; __entry->lip = lip; __entry->type = lip->li_type; __entry->flags = lip->li_flags; __entry->lsn = lip->li_lsn; ), TP_printk("dev %d:%d lip %p lsn %d/%d type %s flags %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->lip, CYCLE_LSN(__entry->lsn), BLOCK_LSN(__entry->lsn), __print_symbolic(__entry->type, XFS_LI_TYPE_DESC), __print_flags(__entry->flags, "|", XFS_LI_FLAGS)) ) TRACE_EVENT(xfs_log_force, TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn, unsigned long caller_ip), TP_ARGS(mp, lsn, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_lsn_t, lsn) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->lsn = lsn; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d lsn 0x%llx caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->lsn, (void *)__entry->caller_ip) ) #define DEFINE_LOG_ITEM_EVENT(name) \ DEFINE_EVENT(xfs_log_item_class, name, \ TP_PROTO(struct xfs_log_item *lip), \ TP_ARGS(lip)) DEFINE_LOG_ITEM_EVENT(xfs_ail_push); DEFINE_LOG_ITEM_EVENT(xfs_ail_pinned); DEFINE_LOG_ITEM_EVENT(xfs_ail_locked); DEFINE_LOG_ITEM_EVENT(xfs_ail_flushing); DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_mark); DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_skip); DEFINE_LOG_ITEM_EVENT(xfs_cil_whiteout_unpin); DECLARE_EVENT_CLASS(xfs_ail_class, TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn), TP_ARGS(lip, old_lsn, new_lsn), TP_STRUCT__entry( __field(dev_t, dev) __field(void *, lip) __field(uint, type) __field(unsigned long, flags) __field(xfs_lsn_t, old_lsn) __field(xfs_lsn_t, new_lsn) ), TP_fast_assign( __entry->dev = lip->li_log->l_mp->m_super->s_dev; __entry->lip = lip; __entry->type = lip->li_type; __entry->flags = lip->li_flags; __entry->old_lsn = old_lsn; __entry->new_lsn = new_lsn; ), TP_printk("dev %d:%d lip %p old lsn %d/%d new lsn %d/%d type %s flags %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->lip, CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn), CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn), __print_symbolic(__entry->type, XFS_LI_TYPE_DESC), __print_flags(__entry->flags, "|", XFS_LI_FLAGS)) ) #define DEFINE_AIL_EVENT(name) \ DEFINE_EVENT(xfs_ail_class, name, \ TP_PROTO(struct xfs_log_item *lip, xfs_lsn_t old_lsn, xfs_lsn_t new_lsn), \ TP_ARGS(lip, old_lsn, new_lsn)) DEFINE_AIL_EVENT(xfs_ail_insert); DEFINE_AIL_EVENT(xfs_ail_move); DEFINE_AIL_EVENT(xfs_ail_delete); TRACE_EVENT(xfs_log_assign_tail_lsn, TP_PROTO(struct xlog *log, xfs_lsn_t new_lsn), TP_ARGS(log, new_lsn), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_lsn_t, new_lsn) __field(xfs_lsn_t, old_lsn) __field(xfs_lsn_t, head_lsn) ), TP_fast_assign( __entry->dev = log->l_mp->m_super->s_dev; __entry->new_lsn = new_lsn; __entry->old_lsn = atomic64_read(&log->l_tail_lsn); __entry->head_lsn = log->l_ailp->ail_head_lsn; ), TP_printk("dev %d:%d new tail lsn %d/%d, old lsn %d/%d, head lsn %d/%d", MAJOR(__entry->dev), MINOR(__entry->dev), CYCLE_LSN(__entry->new_lsn), BLOCK_LSN(__entry->new_lsn), CYCLE_LSN(__entry->old_lsn), BLOCK_LSN(__entry->old_lsn), CYCLE_LSN(__entry->head_lsn), BLOCK_LSN(__entry->head_lsn)) ) DECLARE_EVENT_CLASS(xfs_file_class, TP_PROTO(struct kiocb *iocb, struct iov_iter *iter), TP_ARGS(iocb, iter), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(xfs_fsize_t, size) __field(loff_t, offset) __field(size_t, count) ), TP_fast_assign( __entry->dev = file_inode(iocb->ki_filp)->i_sb->s_dev; __entry->ino = XFS_I(file_inode(iocb->ki_filp))->i_ino; __entry->size = XFS_I(file_inode(iocb->ki_filp))->i_disk_size; __entry->offset = iocb->ki_pos; __entry->count = iov_iter_count(iter); ), TP_printk("dev %d:%d ino 0x%llx disize 0x%llx pos 0x%llx bytecount 0x%zx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->size, __entry->offset, __entry->count) ) #define DEFINE_RW_EVENT(name) \ DEFINE_EVENT(xfs_file_class, name, \ TP_PROTO(struct kiocb *iocb, struct iov_iter *iter), \ TP_ARGS(iocb, iter)) DEFINE_RW_EVENT(xfs_file_buffered_read); DEFINE_RW_EVENT(xfs_file_direct_read); DEFINE_RW_EVENT(xfs_file_dax_read); DEFINE_RW_EVENT(xfs_file_buffered_write); DEFINE_RW_EVENT(xfs_file_direct_write); DEFINE_RW_EVENT(xfs_file_dax_write); DEFINE_RW_EVENT(xfs_reflink_bounce_dio_write); DECLARE_EVENT_CLASS(xfs_imap_class, TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, int whichfork, struct xfs_bmbt_irec *irec), TP_ARGS(ip, offset, count, whichfork, irec), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(loff_t, size) __field(loff_t, offset) __field(size_t, count) __field(int, whichfork) __field(xfs_fileoff_t, startoff) __field(xfs_fsblock_t, startblock) __field(xfs_filblks_t, blockcount) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->size = ip->i_disk_size; __entry->offset = offset; __entry->count = count; __entry->whichfork = whichfork; __entry->startoff = irec ? irec->br_startoff : 0; __entry->startblock = irec ? irec->br_startblock : 0; __entry->blockcount = irec ? irec->br_blockcount : 0; ), TP_printk("dev %d:%d ino 0x%llx disize 0x%llx pos 0x%llx bytecount 0x%zx " "fork %s startoff 0x%llx startblock 0x%llx fsbcount 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->size, __entry->offset, __entry->count, __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), __entry->startoff, (int64_t)__entry->startblock, __entry->blockcount) ) #define DEFINE_IMAP_EVENT(name) \ DEFINE_EVENT(xfs_imap_class, name, \ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ int whichfork, struct xfs_bmbt_irec *irec), \ TP_ARGS(ip, offset, count, whichfork, irec)) DEFINE_IMAP_EVENT(xfs_map_blocks_found); DEFINE_IMAP_EVENT(xfs_map_blocks_alloc); DEFINE_IMAP_EVENT(xfs_iomap_alloc); DEFINE_IMAP_EVENT(xfs_iomap_found); DECLARE_EVENT_CLASS(xfs_simple_io_class, TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), TP_ARGS(ip, offset, count), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(loff_t, isize) __field(loff_t, disize) __field(loff_t, offset) __field(size_t, count) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->isize = VFS_I(ip)->i_size; __entry->disize = ip->i_disk_size; __entry->offset = offset; __entry->count = count; ), TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx " "pos 0x%llx bytecount 0x%zx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->isize, __entry->disize, __entry->offset, __entry->count) ); #define DEFINE_SIMPLE_IO_EVENT(name) \ DEFINE_EVENT(xfs_simple_io_class, name, \ TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \ TP_ARGS(ip, offset, count)) DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize); DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof); DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write); DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_unwritten); DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_append); DEFINE_SIMPLE_IO_EVENT(xfs_file_splice_read); DEFINE_SIMPLE_IO_EVENT(xfs_zoned_map_blocks); DECLARE_EVENT_CLASS(xfs_itrunc_class, TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), TP_ARGS(ip, new_size), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(xfs_fsize_t, size) __field(xfs_fsize_t, new_size) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->size = ip->i_disk_size; __entry->new_size = new_size; ), TP_printk("dev %d:%d ino 0x%llx disize 0x%llx new_size 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->size, __entry->new_size) ) #define DEFINE_ITRUNC_EVENT(name) \ DEFINE_EVENT(xfs_itrunc_class, name, \ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \ TP_ARGS(ip, new_size)) DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_start); DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_end); TRACE_EVENT(xfs_pagecache_inval, TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish), TP_ARGS(ip, start, finish), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(xfs_fsize_t, size) __field(xfs_off_t, start) __field(xfs_off_t, finish) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->size = ip->i_disk_size; __entry->start = start; __entry->finish = finish; ), TP_printk("dev %d:%d ino 0x%llx disize 0x%llx start 0x%llx finish 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->size, __entry->start, __entry->finish) ); TRACE_EVENT(xfs_bunmap, TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t fileoff, xfs_filblks_t len, int flags, unsigned long caller_ip), TP_ARGS(ip, fileoff, len, flags, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(xfs_fsize_t, size) __field(xfs_fileoff_t, fileoff) __field(xfs_filblks_t, len) __field(unsigned long, caller_ip) __field(int, flags) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->size = ip->i_disk_size; __entry->fileoff = fileoff; __entry->len = len; __entry->caller_ip = caller_ip; __entry->flags = flags; ), TP_printk("dev %d:%d ino 0x%llx disize 0x%llx fileoff 0x%llx fsbcount 0x%llx " "flags %s caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->size, __entry->fileoff, __entry->len, __print_flags(__entry->flags, "|", XFS_BMAPI_FLAGS), (void *)__entry->caller_ip) ); DECLARE_EVENT_CLASS(xfs_extent_busy_class, TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, xfs_extlen_t len), TP_ARGS(xg, agbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) ), TP_fast_assign( __entry->dev = xg->xg_mount->m_super->s_dev; __entry->type = xg->xg_type; __entry->agno = xg->xg_gno; __entry->agbno = agbno; __entry->len = len; ), TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agbno, __entry->len) ); #define DEFINE_BUSY_EVENT(name) \ DEFINE_EVENT(xfs_extent_busy_class, name, \ TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \ xfs_extlen_t len), \ TP_ARGS(xg, agbno, len)) DEFINE_BUSY_EVENT(xfs_extent_busy); DEFINE_BUSY_EVENT(xfs_extent_busy_force); DEFINE_BUSY_EVENT(xfs_extent_busy_reuse); DEFINE_BUSY_EVENT(xfs_extent_busy_clear); TRACE_EVENT(xfs_extent_busy_trim, TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, xfs_extlen_t len, xfs_agblock_t tbno, xfs_extlen_t tlen), TP_ARGS(xg, agbno, len, tbno, tlen), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) __field(xfs_agblock_t, tbno) __field(xfs_extlen_t, tlen) ), TP_fast_assign( __entry->dev = xg->xg_mount->m_super->s_dev; __entry->type = xg->xg_type; __entry->agno = xg->xg_gno; __entry->agbno = agbno; __entry->len = len; __entry->tbno = tbno; __entry->tlen = tlen; ), TP_printk("dev %d:%d %sno 0x%x %sbno 0x%x fsbcount 0x%x found_agbno 0x%x found_fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agbno, __entry->len, __entry->tbno, __entry->tlen) ); #ifdef CONFIG_XFS_RT TRACE_EVENT(xfs_rtalloc_extent_busy, TP_PROTO(struct xfs_rtgroup *rtg, xfs_rtxnum_t start, xfs_rtxlen_t minlen, xfs_rtxlen_t maxlen, xfs_rtxlen_t len, xfs_rtxlen_t prod, xfs_rtxnum_t rtx, unsigned busy_gen), TP_ARGS(rtg, start, minlen, maxlen, len, prod, rtx, busy_gen), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_rgnumber_t, rgno) __field(xfs_rtxnum_t, start) __field(xfs_rtxlen_t, minlen) __field(xfs_rtxlen_t, maxlen) __field(xfs_rtxlen_t, mod) __field(xfs_rtxlen_t, prod) __field(xfs_rtxlen_t, len) __field(xfs_rtxnum_t, rtx) __field(unsigned, busy_gen) ), TP_fast_assign( __entry->dev = rtg_mount(rtg)->m_super->s_dev; __entry->rgno = rtg_rgno(rtg); __entry->start = start; __entry->minlen = minlen; __entry->maxlen = maxlen; __entry->prod = prod; __entry->len = len; __entry->rtx = rtx; __entry->busy_gen = busy_gen; ), TP_printk("dev %d:%d rgno 0x%x startrtx 0x%llx minlen %u maxlen %u " "prod %u len %u rtx 0%llx busy_gen 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rgno, __entry->start, __entry->minlen, __entry->maxlen, __entry->prod, __entry->len, __entry->rtx, __entry->busy_gen) ) TRACE_EVENT(xfs_rtalloc_extent_busy_trim, TP_PROTO(struct xfs_rtgroup *rtg, xfs_rtxnum_t old_rtx, xfs_rtxlen_t old_len, xfs_rtxnum_t new_rtx, xfs_rtxlen_t new_len), TP_ARGS(rtg, old_rtx, old_len, new_rtx, new_len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_rgnumber_t, rgno) __field(xfs_rtxnum_t, old_rtx) __field(xfs_rtxnum_t, new_rtx) __field(xfs_rtxlen_t, old_len) __field(xfs_rtxlen_t, new_len) ), TP_fast_assign( __entry->dev = rtg_mount(rtg)->m_super->s_dev; __entry->rgno = rtg_rgno(rtg); __entry->old_rtx = old_rtx; __entry->old_len = old_len; __entry->new_rtx = new_rtx; __entry->new_len = new_len; ), TP_printk("dev %d:%d rgno 0x%x rtx 0x%llx rtxcount 0x%x -> rtx 0x%llx rtxcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rgno, __entry->old_rtx, __entry->old_len, __entry->new_rtx, __entry->new_len) ); #endif /* CONFIG_XFS_RT */ DECLARE_EVENT_CLASS(xfs_agf_class, TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, unsigned long caller_ip), TP_ARGS(mp, agf, flags, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(int, flags) __field(__u32, length) __field(__u32, bno_root) __field(__u32, cnt_root) __field(__u32, bno_level) __field(__u32, cnt_level) __field(__u32, flfirst) __field(__u32, fllast) __field(__u32, flcount) __field(__u32, freeblks) __field(__u32, longest) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->agno = be32_to_cpu(agf->agf_seqno), __entry->flags = flags; __entry->length = be32_to_cpu(agf->agf_length), __entry->bno_root = be32_to_cpu(agf->agf_bno_root), __entry->cnt_root = be32_to_cpu(agf->agf_cnt_root), __entry->bno_level = be32_to_cpu(agf->agf_bno_level), __entry->cnt_level = be32_to_cpu(agf->agf_cnt_level), __entry->flfirst = be32_to_cpu(agf->agf_flfirst), __entry->fllast = be32_to_cpu(agf->agf_fllast), __entry->flcount = be32_to_cpu(agf->agf_flcount), __entry->freeblks = be32_to_cpu(agf->agf_freeblks), __entry->longest = be32_to_cpu(agf->agf_longest); __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d agno 0x%x flags %s length %u roots b %u c %u " "levels b %u c %u flfirst %u fllast %u flcount %u " "freeblks %u longest %u caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __print_flags(__entry->flags, "|", XFS_AGF_FLAGS), __entry->length, __entry->bno_root, __entry->cnt_root, __entry->bno_level, __entry->cnt_level, __entry->flfirst, __entry->fllast, __entry->flcount, __entry->freeblks, __entry->longest, (void *)__entry->caller_ip) ); #define DEFINE_AGF_EVENT(name) \ DEFINE_EVENT(xfs_agf_class, name, \ TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \ unsigned long caller_ip), \ TP_ARGS(mp, agf, flags, caller_ip)) DEFINE_AGF_EVENT(xfs_agf); DEFINE_AGF_EVENT(xfs_agfl_reset); TRACE_EVENT(xfs_free_extent, TP_PROTO(const struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len, enum xfs_ag_resv_type resv, int haveleft, int haveright), TP_ARGS(pag, agbno, len, resv, haveleft, haveright), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) __field(int, resv) __field(int, haveleft) __field(int, haveright) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); __entry->agbno = agbno; __entry->len = len; __entry->resv = resv; __entry->haveleft = haveleft; __entry->haveright = haveright; ), TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x resv %d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agbno, __entry->len, __entry->resv, __entry->haveleft ? (__entry->haveright ? "both" : "left") : (__entry->haveright ? "right" : "none")) ); DECLARE_EVENT_CLASS(xfs_alloc_class, TP_PROTO(struct xfs_alloc_arg *args), TP_ARGS(args), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, minlen) __field(xfs_extlen_t, maxlen) __field(xfs_extlen_t, mod) __field(xfs_extlen_t, prod) __field(xfs_extlen_t, minleft) __field(xfs_extlen_t, total) __field(xfs_extlen_t, alignment) __field(xfs_extlen_t, minalignslop) __field(xfs_extlen_t, len) __field(char, wasdel) __field(char, wasfromfl) __field(int, resv) __field(int, datatype) __field(xfs_agnumber_t, highest_agno) ), TP_fast_assign( __entry->dev = args->mp->m_super->s_dev; __entry->agno = args->agno; __entry->agbno = args->agbno; __entry->minlen = args->minlen; __entry->maxlen = args->maxlen; __entry->mod = args->mod; __entry->prod = args->prod; __entry->minleft = args->minleft; __entry->total = args->total; __entry->alignment = args->alignment; __entry->minalignslop = args->minalignslop; __entry->len = args->len; __entry->wasdel = args->wasdel; __entry->wasfromfl = args->wasfromfl; __entry->resv = args->resv; __entry->datatype = args->datatype; __entry->highest_agno = args->tp->t_highest_agno; ), TP_printk("dev %d:%d agno 0x%x agbno 0x%x minlen %u maxlen %u mod %u " "prod %u minleft %u total %u alignment %u minalignslop %u " "len %u wasdel %d wasfromfl %d resv %d " "datatype 0x%x highest_agno 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agbno, __entry->minlen, __entry->maxlen, __entry->mod, __entry->prod, __entry->minleft, __entry->total, __entry->alignment, __entry->minalignslop, __entry->len, __entry->wasdel, __entry->wasfromfl, __entry->resv, __entry->datatype, __entry->highest_agno) ) #define DEFINE_ALLOC_EVENT(name) \ DEFINE_EVENT(xfs_alloc_class, name, \ TP_PROTO(struct xfs_alloc_arg *args), \ TP_ARGS(args)) DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound); DEFINE_ALLOC_EVENT(xfs_alloc_exact_error); DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft); DEFINE_ALLOC_EVENT(xfs_alloc_near_first); DEFINE_ALLOC_EVENT(xfs_alloc_cur); DEFINE_ALLOC_EVENT(xfs_alloc_cur_right); DEFINE_ALLOC_EVENT(xfs_alloc_cur_left); DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup); DEFINE_ALLOC_EVENT(xfs_alloc_cur_lookup_done); DEFINE_ALLOC_EVENT(xfs_alloc_near_error); DEFINE_ALLOC_EVENT(xfs_alloc_near_noentry); DEFINE_ALLOC_EVENT(xfs_alloc_near_busy); DEFINE_ALLOC_EVENT(xfs_alloc_size_neither); DEFINE_ALLOC_EVENT(xfs_alloc_size_noentry); DEFINE_ALLOC_EVENT(xfs_alloc_size_nominleft); DEFINE_ALLOC_EVENT(xfs_alloc_size_done); DEFINE_ALLOC_EVENT(xfs_alloc_size_error); DEFINE_ALLOC_EVENT(xfs_alloc_size_busy); DEFINE_ALLOC_EVENT(xfs_alloc_small_freelist); DEFINE_ALLOC_EVENT(xfs_alloc_small_notenough); DEFINE_ALLOC_EVENT(xfs_alloc_small_done); DEFINE_ALLOC_EVENT(xfs_alloc_small_error); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_badargs); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_skip_deadlock); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_nofix); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_this_ag); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_start_ag); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_first_ag); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_exact_bno); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_near_bno); DEFINE_ALLOC_EVENT(xfs_alloc_vextent_finish); TRACE_EVENT(xfs_alloc_cur_check, TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t bno, xfs_extlen_t len, xfs_extlen_t diff, bool new), TP_ARGS(cur, bno, len, diff, new), TP_STRUCT__entry( __field(dev_t, dev) __string(name, cur->bc_ops->name) __field(xfs_agblock_t, bno) __field(xfs_extlen_t, len) __field(xfs_extlen_t, diff) __field(bool, new) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __assign_str(name); __entry->bno = bno; __entry->len = len; __entry->diff = diff; __entry->new = new; ), TP_printk("dev %d:%d %sbt agbno 0x%x fsbcount 0x%x diff 0x%x new %d", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->bno, __entry->len, __entry->diff, __entry->new) ) DECLARE_EVENT_CLASS(xfs_da_class, TP_PROTO(struct xfs_da_args *args), TP_ARGS(args), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __dynamic_array(char, name, args->namelen) __field(int, namelen) __field(xfs_dahash_t, hashval) __field(xfs_ino_t, inumber) __field(uint32_t, op_flags) __field(xfs_ino_t, owner) ), TP_fast_assign( __entry->dev = VFS_I(args->dp)->i_sb->s_dev; __entry->ino = args->dp->i_ino; if (args->namelen) memcpy(__get_str(name), args->name, args->namelen); __entry->namelen = args->namelen; __entry->hashval = args->hashval; __entry->inumber = args->inumber; __entry->op_flags = args->op_flags; __entry->owner = args->owner; ), TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " "inumber 0x%llx op_flags %s owner 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->namelen, __entry->namelen ? __get_str(name) : NULL, __entry->namelen, __entry->hashval, __entry->inumber, __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), __entry->owner) ) #define DEFINE_DIR2_EVENT(name) \ DEFINE_EVENT(xfs_da_class, name, \ TP_PROTO(struct xfs_da_args *args), \ TP_ARGS(args)) DEFINE_DIR2_EVENT(xfs_dir2_sf_addname); DEFINE_DIR2_EVENT(xfs_dir2_sf_create); DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup); DEFINE_DIR2_EVENT(xfs_dir2_sf_replace); DEFINE_DIR2_EVENT(xfs_dir2_sf_removename); DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4); DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8); DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block); DEFINE_DIR2_EVENT(xfs_dir2_block_addname); DEFINE_DIR2_EVENT(xfs_dir2_block_lookup); DEFINE_DIR2_EVENT(xfs_dir2_block_replace); DEFINE_DIR2_EVENT(xfs_dir2_block_removename); DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf); DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf); DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname); DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup); DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace); DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename); DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block); DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node); DEFINE_DIR2_EVENT(xfs_dir2_node_addname); DEFINE_DIR2_EVENT(xfs_dir2_node_lookup); DEFINE_DIR2_EVENT(xfs_dir2_node_replace); DEFINE_DIR2_EVENT(xfs_dir2_node_removename); DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf); DECLARE_EVENT_CLASS(xfs_attr_class, TP_PROTO(struct xfs_da_args *args), TP_ARGS(args), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __dynamic_array(char, name, args->namelen) __field(int, namelen) __field(int, valuelen) __field(xfs_dahash_t, hashval) __field(unsigned int, attr_filter) __field(uint32_t, op_flags) ), TP_fast_assign( __entry->dev = VFS_I(args->dp)->i_sb->s_dev; __entry->ino = args->dp->i_ino; if (args->namelen) memcpy(__get_str(name), args->name, args->namelen); __entry->namelen = args->namelen; __entry->valuelen = args->valuelen; __entry->hashval = args->hashval; __entry->attr_filter = args->attr_filter; __entry->op_flags = args->op_flags; ), TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d valuelen %d " "hashval 0x%x filter %s op_flags %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->namelen, __entry->namelen ? __get_str(name) : NULL, __entry->namelen, __entry->valuelen, __entry->hashval, __print_flags(__entry->attr_filter, "|", XFS_ATTR_FILTER_FLAGS), __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) ) #define DEFINE_ATTR_EVENT(name) \ DEFINE_EVENT(xfs_attr_class, name, \ TP_PROTO(struct xfs_da_args *args), \ TP_ARGS(args)) DEFINE_ATTR_EVENT(xfs_attr_sf_add); DEFINE_ATTR_EVENT(xfs_attr_sf_addname); DEFINE_ATTR_EVENT(xfs_attr_sf_create); DEFINE_ATTR_EVENT(xfs_attr_sf_lookup); DEFINE_ATTR_EVENT(xfs_attr_sf_remove); DEFINE_ATTR_EVENT(xfs_attr_sf_to_leaf); DEFINE_ATTR_EVENT(xfs_attr_leaf_add); DEFINE_ATTR_EVENT(xfs_attr_leaf_add_old); DEFINE_ATTR_EVENT(xfs_attr_leaf_add_new); DEFINE_ATTR_EVENT(xfs_attr_leaf_add_work); DEFINE_ATTR_EVENT(xfs_attr_leaf_create); DEFINE_ATTR_EVENT(xfs_attr_leaf_compact); DEFINE_ATTR_EVENT(xfs_attr_leaf_get); DEFINE_ATTR_EVENT(xfs_attr_leaf_lookup); DEFINE_ATTR_EVENT(xfs_attr_leaf_replace); DEFINE_ATTR_EVENT(xfs_attr_leaf_remove); DEFINE_ATTR_EVENT(xfs_attr_leaf_removename); DEFINE_ATTR_EVENT(xfs_attr_leaf_split); DEFINE_ATTR_EVENT(xfs_attr_leaf_split_before); DEFINE_ATTR_EVENT(xfs_attr_leaf_split_after); DEFINE_ATTR_EVENT(xfs_attr_leaf_clearflag); DEFINE_ATTR_EVENT(xfs_attr_leaf_setflag); DEFINE_ATTR_EVENT(xfs_attr_leaf_flipflags); DEFINE_ATTR_EVENT(xfs_attr_leaf_to_sf); DEFINE_ATTR_EVENT(xfs_attr_leaf_to_node); DEFINE_ATTR_EVENT(xfs_attr_leaf_rebalance); DEFINE_ATTR_EVENT(xfs_attr_leaf_unbalance); DEFINE_ATTR_EVENT(xfs_attr_leaf_toosmall); DEFINE_ATTR_EVENT(xfs_attr_node_addname); DEFINE_ATTR_EVENT(xfs_attr_node_get); DEFINE_ATTR_EVENT(xfs_attr_node_replace); DEFINE_ATTR_EVENT(xfs_attr_node_removename); DEFINE_ATTR_EVENT(xfs_attr_fillstate); DEFINE_ATTR_EVENT(xfs_attr_refillstate); DEFINE_ATTR_EVENT(xfs_attr_rmtval_get); DEFINE_ATTR_EVENT(xfs_attr_rmtval_set); #define DEFINE_DA_EVENT(name) \ DEFINE_EVENT(xfs_da_class, name, \ TP_PROTO(struct xfs_da_args *args), \ TP_ARGS(args)) DEFINE_DA_EVENT(xfs_da_split); DEFINE_DA_EVENT(xfs_da_join); DEFINE_DA_EVENT(xfs_da_link_before); DEFINE_DA_EVENT(xfs_da_link_after); DEFINE_DA_EVENT(xfs_da_unlink_back); DEFINE_DA_EVENT(xfs_da_unlink_forward); DEFINE_DA_EVENT(xfs_da_root_split); DEFINE_DA_EVENT(xfs_da_root_join); DEFINE_DA_EVENT(xfs_da_node_add); DEFINE_DA_EVENT(xfs_da_node_create); DEFINE_DA_EVENT(xfs_da_node_split); DEFINE_DA_EVENT(xfs_da_node_remove); DEFINE_DA_EVENT(xfs_da_node_rebalance); DEFINE_DA_EVENT(xfs_da_node_unbalance); DEFINE_DA_EVENT(xfs_da_node_toosmall); DEFINE_DA_EVENT(xfs_da_swap_lastblock); DEFINE_DA_EVENT(xfs_da_grow_inode); DEFINE_DA_EVENT(xfs_da_shrink_inode); DEFINE_DA_EVENT(xfs_da_fixhashpath); DEFINE_DA_EVENT(xfs_da_path_shift); DECLARE_EVENT_CLASS(xfs_dir2_space_class, TP_PROTO(struct xfs_da_args *args, int idx), TP_ARGS(args, idx), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(uint32_t, op_flags) __field(int, idx) ), TP_fast_assign( __entry->dev = VFS_I(args->dp)->i_sb->s_dev; __entry->ino = args->dp->i_ino; __entry->op_flags = args->op_flags; __entry->idx = idx; ), TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), __entry->idx) ) #define DEFINE_DIR2_SPACE_EVENT(name) \ DEFINE_EVENT(xfs_dir2_space_class, name, \ TP_PROTO(struct xfs_da_args *args, int idx), \ TP_ARGS(args, idx)) DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add); DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove); DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode); DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode); TRACE_EVENT(xfs_dir2_leafn_moveents, TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count), TP_ARGS(args, src_idx, dst_idx, count), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(uint32_t, op_flags) __field(int, src_idx) __field(int, dst_idx) __field(int, count) ), TP_fast_assign( __entry->dev = VFS_I(args->dp)->i_sb->s_dev; __entry->ino = args->dp->i_ino; __entry->op_flags = args->op_flags; __entry->src_idx = src_idx; __entry->dst_idx = dst_idx; __entry->count = count; ), TP_printk("dev %d:%d ino 0x%llx op_flags %s " "src_idx %d dst_idx %d count %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), __entry->src_idx, __entry->dst_idx, __entry->count) ); #define XFS_SWAPEXT_INODES \ { 0, "target" }, \ { 1, "temp" } TRACE_DEFINE_ENUM(XFS_DINODE_FMT_DEV); TRACE_DEFINE_ENUM(XFS_DINODE_FMT_LOCAL); TRACE_DEFINE_ENUM(XFS_DINODE_FMT_EXTENTS); TRACE_DEFINE_ENUM(XFS_DINODE_FMT_BTREE); TRACE_DEFINE_ENUM(XFS_DINODE_FMT_UUID); TRACE_DEFINE_ENUM(XFS_DINODE_FMT_META_BTREE); DECLARE_EVENT_CLASS(xfs_swap_extent_class, TP_PROTO(struct xfs_inode *ip, int which), TP_ARGS(ip, which), TP_STRUCT__entry( __field(dev_t, dev) __field(int, which) __field(xfs_ino_t, ino) __field(int, format) __field(xfs_extnum_t, nex) __field(int, broot_size) __field(int, fork_off) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->which = which; __entry->ino = ip->i_ino; __entry->format = ip->i_df.if_format; __entry->nex = ip->i_df.if_nextents; __entry->broot_size = ip->i_df.if_broot_bytes; __entry->fork_off = xfs_inode_fork_boff(ip); ), TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %llu, " "broot size %d, forkoff 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_symbolic(__entry->which, XFS_SWAPEXT_INODES), __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR), __entry->nex, __entry->broot_size, __entry->fork_off) ) #define DEFINE_SWAPEXT_EVENT(name) \ DEFINE_EVENT(xfs_swap_extent_class, name, \ TP_PROTO(struct xfs_inode *ip, int which), \ TP_ARGS(ip, which)) DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before); DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); TRACE_EVENT(xfs_log_recover, TP_PROTO(struct xlog *log, xfs_daddr_t headblk, xfs_daddr_t tailblk), TP_ARGS(log, headblk, tailblk), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_daddr_t, headblk) __field(xfs_daddr_t, tailblk) ), TP_fast_assign( __entry->dev = log->l_mp->m_super->s_dev; __entry->headblk = headblk; __entry->tailblk = tailblk; ), TP_printk("dev %d:%d headblk 0x%llx tailblk 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->headblk, __entry->tailblk) ) TRACE_EVENT(xfs_log_recover_record, TP_PROTO(struct xlog *log, struct xlog_rec_header *rhead, int pass), TP_ARGS(log, rhead, pass), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_lsn_t, lsn) __field(int, len) __field(int, num_logops) __field(int, pass) ), TP_fast_assign( __entry->dev = log->l_mp->m_super->s_dev; __entry->lsn = be64_to_cpu(rhead->h_lsn); __entry->len = be32_to_cpu(rhead->h_len); __entry->num_logops = be32_to_cpu(rhead->h_num_logops); __entry->pass = pass; ), TP_printk("dev %d:%d lsn 0x%llx len 0x%x num_logops 0x%x pass %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->lsn, __entry->len, __entry->num_logops, __entry->pass) ) DECLARE_EVENT_CLASS(xfs_log_recover_item_class, TP_PROTO(struct xlog *log, struct xlog_recover *trans, struct xlog_recover_item *item, int pass), TP_ARGS(log, trans, item, pass), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, item) __field(xlog_tid_t, tid) __field(xfs_lsn_t, lsn) __field(int, type) __field(int, pass) __field(int, count) __field(int, total) ), TP_fast_assign( __entry->dev = log->l_mp->m_super->s_dev; __entry->item = (unsigned long)item; __entry->tid = trans->r_log_tid; __entry->lsn = trans->r_lsn; __entry->type = ITEM_TYPE(item); __entry->pass = pass; __entry->count = item->ri_cnt; __entry->total = item->ri_total; ), TP_printk("dev %d:%d tid 0x%x lsn 0x%llx, pass %d, item %p, " "item type %s item region count/total %d/%d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->lsn, __entry->pass, (void *)__entry->item, __print_symbolic(__entry->type, XFS_LI_TYPE_DESC), __entry->count, __entry->total) ) #define DEFINE_LOG_RECOVER_ITEM(name) \ DEFINE_EVENT(xfs_log_recover_item_class, name, \ TP_PROTO(struct xlog *log, struct xlog_recover *trans, \ struct xlog_recover_item *item, int pass), \ TP_ARGS(log, trans, item, pass)) DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add); DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_add_cont); DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_head); DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail); DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), TP_ARGS(log, buf_f), TP_STRUCT__entry( __field(dev_t, dev) __field(int64_t, blkno) __field(unsigned short, len) __field(unsigned short, flags) __field(unsigned short, size) __field(unsigned int, map_size) ), TP_fast_assign( __entry->dev = log->l_mp->m_super->s_dev; __entry->blkno = buf_f->blf_blkno; __entry->len = buf_f->blf_len; __entry->flags = buf_f->blf_flags; __entry->size = buf_f->blf_size; __entry->map_size = buf_f->blf_map_size; ), TP_printk("dev %d:%d daddr 0x%llx, bbcount 0x%x, flags 0x%x, size %d, " "map_size %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->blkno, __entry->len, __entry->flags, __entry->size, __entry->map_size) ) #define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \ TP_ARGS(log, buf_f)) DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel); DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_add); DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_cancel_ref_inc); DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_recover); DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_skip); DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_inode_buf); DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf); DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), TP_ARGS(log, in_f), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(unsigned short, size) __field(int, fields) __field(unsigned short, asize) __field(unsigned short, dsize) __field(int64_t, blkno) __field(int, len) __field(int, boffset) ), TP_fast_assign( __entry->dev = log->l_mp->m_super->s_dev; __entry->ino = in_f->ilf_ino; __entry->size = in_f->ilf_size; __entry->fields = in_f->ilf_fields; __entry->asize = in_f->ilf_asize; __entry->dsize = in_f->ilf_dsize; __entry->blkno = in_f->ilf_blkno; __entry->len = in_f->ilf_len; __entry->boffset = in_f->ilf_boffset; ), TP_printk("dev %d:%d ino 0x%llx, size %u, fields 0x%x, asize %d, " "dsize %d, daddr 0x%llx, bbcount 0x%x, boffset %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->size, __entry->fields, __entry->asize, __entry->dsize, __entry->blkno, __entry->len, __entry->boffset) ) #define DEFINE_LOG_RECOVER_INO_ITEM(name) \ DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \ TP_ARGS(log, in_f)) DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_cancel); DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_skip); DECLARE_EVENT_CLASS(xfs_log_recover_icreate_item_class, TP_PROTO(struct xlog *log, struct xfs_icreate_log *in_f), TP_ARGS(log, in_f), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(unsigned int, count) __field(unsigned int, isize) __field(xfs_agblock_t, length) __field(unsigned int, gen) ), TP_fast_assign( __entry->dev = log->l_mp->m_super->s_dev; __entry->agno = be32_to_cpu(in_f->icl_ag); __entry->agbno = be32_to_cpu(in_f->icl_agbno); __entry->count = be32_to_cpu(in_f->icl_count); __entry->isize = be32_to_cpu(in_f->icl_isize); __entry->length = be32_to_cpu(in_f->icl_length); __entry->gen = be32_to_cpu(in_f->icl_gen); ), TP_printk("dev %d:%d agno 0x%x agbno 0x%x fsbcount 0x%x ireccount %u isize %u gen 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agbno, __entry->length, __entry->count, __entry->isize, __entry->gen) ) #define DEFINE_LOG_RECOVER_ICREATE_ITEM(name) \ DEFINE_EVENT(xfs_log_recover_icreate_item_class, name, \ TP_PROTO(struct xlog *log, struct xfs_icreate_log *in_f), \ TP_ARGS(log, in_f)) DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_cancel); DEFINE_LOG_RECOVER_ICREATE_ITEM(xfs_log_recover_icreate_recover); DECLARE_EVENT_CLASS(xfs_discard_class, TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, xfs_extlen_t len), TP_ARGS(xg, agbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) ), TP_fast_assign( __entry->dev = xg->xg_mount->m_super->s_dev; __entry->type = xg->xg_type; __entry->agno = xg->xg_gno; __entry->agbno = agbno; __entry->len = len; ), TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->agbno, __entry->len) ) #define DEFINE_DISCARD_EVENT(name) \ DEFINE_EVENT(xfs_discard_class, name, \ TP_PROTO(const struct xfs_group *xg, xfs_agblock_t agbno, \ xfs_extlen_t len), \ TP_ARGS(xg, agbno, len)) DEFINE_DISCARD_EVENT(xfs_discard_extent); DEFINE_DISCARD_EVENT(xfs_discard_toosmall); DEFINE_DISCARD_EVENT(xfs_discard_exclude); DEFINE_DISCARD_EVENT(xfs_discard_busy); DECLARE_EVENT_CLASS(xfs_rtdiscard_class, TP_PROTO(struct xfs_mount *mp, xfs_rtblock_t rtbno, xfs_rtblock_t len), TP_ARGS(mp, rtbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_rtblock_t, rtbno) __field(xfs_rtblock_t, len) ), TP_fast_assign( __entry->dev = mp->m_rtdev_targp->bt_dev; __entry->rtbno = rtbno; __entry->len = len; ), TP_printk("dev %d:%d rtbno 0x%llx rtbcount 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rtbno, __entry->len) ) #define DEFINE_RTDISCARD_EVENT(name) \ DEFINE_EVENT(xfs_rtdiscard_class, name, \ TP_PROTO(struct xfs_mount *mp, \ xfs_rtblock_t rtbno, xfs_rtblock_t len), \ TP_ARGS(mp, rtbno, len)) DEFINE_RTDISCARD_EVENT(xfs_discard_rtextent); DEFINE_RTDISCARD_EVENT(xfs_discard_rttoosmall); DEFINE_RTDISCARD_EVENT(xfs_discard_rtrelax); DECLARE_EVENT_CLASS(xfs_btree_cur_class, TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), TP_ARGS(cur, level, bp), TP_STRUCT__entry( __field(dev_t, dev) __string(name, cur->bc_ops->name) __field(int, level) __field(int, nlevels) __field(int, ptr) __field(xfs_daddr_t, daddr) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __assign_str(name); __entry->level = level; __entry->nlevels = cur->bc_nlevels; __entry->ptr = cur->bc_levels[level].ptr; __entry->daddr = bp ? xfs_buf_daddr(bp) : -1; ), TP_printk("dev %d:%d %sbt level %d/%d ptr %d daddr 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->level, __entry->nlevels, __entry->ptr, (unsigned long long)__entry->daddr) ) #define DEFINE_BTREE_CUR_EVENT(name) \ DEFINE_EVENT(xfs_btree_cur_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), \ TP_ARGS(cur, level, bp)) DEFINE_BTREE_CUR_EVENT(xfs_btree_updkeys); DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range); TRACE_EVENT(xfs_btree_alloc_block, TP_PROTO(struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int stat, int error), TP_ARGS(cur, ptr, stat, error), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_ino_t, ino) __string(name, cur->bc_ops->name) __field(int, error) __field(xfs_agblock_t, agbno) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; switch (cur->bc_ops->type) { case XFS_BTREE_TYPE_INODE: __entry->agno = 0; __entry->ino = cur->bc_ino.ip->i_ino; break; case XFS_BTREE_TYPE_AG: __entry->agno = cur->bc_group->xg_gno; __entry->ino = 0; break; case XFS_BTREE_TYPE_MEM: __entry->agno = 0; __entry->ino = 0; break; } __assign_str(name); __entry->error = error; if (!error && stat) { if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { xfs_fsblock_t fsb = be64_to_cpu(ptr->l); __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb); __entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb); } else { __entry->agbno = be32_to_cpu(ptr->s); } } else { __entry->agbno = NULLAGBLOCK; } ), TP_printk("dev %d:%d %sbt agno 0x%x ino 0x%llx agbno 0x%x error %d", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->agno, __entry->ino, __entry->agbno, __entry->error) ); TRACE_EVENT(xfs_btree_free_block, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_buf *bp), TP_ARGS(cur, bp), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_ino_t, ino) __string(name, cur->bc_ops->name) __field(xfs_agblock_t, agbno) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->agno = xfs_daddr_to_agno(cur->bc_mp, xfs_buf_daddr(bp)); if (cur->bc_ops->type == XFS_BTREE_TYPE_INODE) __entry->ino = cur->bc_ino.ip->i_ino; else __entry->ino = 0; __assign_str(name); __entry->agbno = xfs_daddr_to_agbno(cur->bc_mp, xfs_buf_daddr(bp)); ), TP_printk("dev %d:%d %sbt agno 0x%x ino 0x%llx agbno 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->agno, __entry->ino, __entry->agbno) ); /* deferred ops */ struct xfs_defer_pending; DECLARE_EVENT_CLASS(xfs_defer_class, TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), TP_ARGS(tp, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(struct xfs_trans *, tp) __field(char, committed) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = tp->t_mountp->m_super->s_dev; __entry->tp = tp; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d tp %p caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tp, (char *)__entry->caller_ip) ) #define DEFINE_DEFER_EVENT(name) \ DEFINE_EVENT(xfs_defer_class, name, \ TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \ TP_ARGS(tp, caller_ip)) DECLARE_EVENT_CLASS(xfs_defer_error_class, TP_PROTO(struct xfs_trans *tp, int error), TP_ARGS(tp, error), TP_STRUCT__entry( __field(dev_t, dev) __field(struct xfs_trans *, tp) __field(char, committed) __field(int, error) ), TP_fast_assign( __entry->dev = tp->t_mountp->m_super->s_dev; __entry->tp = tp; __entry->error = error; ), TP_printk("dev %d:%d tp %p err %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tp, __entry->error) ) #define DEFINE_DEFER_ERROR_EVENT(name) \ DEFINE_EVENT(xfs_defer_error_class, name, \ TP_PROTO(struct xfs_trans *tp, int error), \ TP_ARGS(tp, error)) DECLARE_EVENT_CLASS(xfs_defer_pending_class, TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp), TP_ARGS(mp, dfp), TP_STRUCT__entry( __field(dev_t, dev) __string(name, dfp->dfp_ops->name) __field(void *, intent) __field(unsigned int, flags) __field(char, committed) __field(int, nr) ), TP_fast_assign( __entry->dev = mp ? mp->m_super->s_dev : 0; __assign_str(name); __entry->intent = dfp->dfp_intent; __entry->flags = dfp->dfp_flags; __entry->committed = dfp->dfp_done != NULL; __entry->nr = dfp->dfp_count; ), TP_printk("dev %d:%d optype %s intent %p flags %s committed %d nr %d", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->intent, __print_flags(__entry->flags, "|", XFS_DEFER_PENDING_STRINGS), __entry->committed, __entry->nr) ) #define DEFINE_DEFER_PENDING_EVENT(name) \ DEFINE_EVENT(xfs_defer_pending_class, name, \ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp), \ TP_ARGS(mp, dfp)) DEFINE_DEFER_EVENT(xfs_defer_cancel); DEFINE_DEFER_EVENT(xfs_defer_trans_roll); DEFINE_DEFER_EVENT(xfs_defer_trans_abort); DEFINE_DEFER_EVENT(xfs_defer_finish); DEFINE_DEFER_EVENT(xfs_defer_finish_done); DEFINE_DEFER_ERROR_EVENT(xfs_defer_trans_roll_error); DEFINE_DEFER_ERROR_EVENT(xfs_defer_finish_error); DEFINE_DEFER_PENDING_EVENT(xfs_defer_create_intent); DEFINE_DEFER_PENDING_EVENT(xfs_defer_cancel_list); DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish); DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort); DEFINE_DEFER_PENDING_EVENT(xfs_defer_relog_intent); DEFINE_DEFER_PENDING_EVENT(xfs_defer_isolate_paused); DEFINE_DEFER_PENDING_EVENT(xfs_defer_item_pause); DEFINE_DEFER_PENDING_EVENT(xfs_defer_item_unpause); DECLARE_EVENT_CLASS(xfs_free_extent_deferred_class, TP_PROTO(struct xfs_mount *mp, struct xfs_extent_free_item *free), TP_ARGS(mp, free), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->type = free->xefi_group->xg_type; __entry->agno = free->xefi_group->xg_gno; __entry->agbno = xfs_fsb_to_gbno(mp, free->xefi_startblock, free->xefi_group->xg_type); __entry->len = free->xefi_blockcount; __entry->flags = free->xefi_flags; ), TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->agbno, __entry->len, __entry->flags) ); #define DEFINE_FREE_EXTENT_DEFERRED_EVENT(name) \ DEFINE_EVENT(xfs_free_extent_deferred_class, name, \ TP_PROTO(struct xfs_mount *mp, struct xfs_extent_free_item *free), \ TP_ARGS(mp, free)) DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_agfl_free_deferred); DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_extent_free_defer); DEFINE_FREE_EXTENT_DEFERRED_EVENT(xfs_extent_free_deferred); DECLARE_EVENT_CLASS(xfs_defer_pending_item_class, TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp, void *item), TP_ARGS(mp, dfp, item), TP_STRUCT__entry( __field(dev_t, dev) __string(name, dfp->dfp_ops->name) __field(void *, intent) __field(void *, item) __field(char, committed) __field(unsigned int, flags) __field(int, nr) ), TP_fast_assign( __entry->dev = mp ? mp->m_super->s_dev : 0; __assign_str(name); __entry->intent = dfp->dfp_intent; __entry->item = item; __entry->committed = dfp->dfp_done != NULL; __entry->flags = dfp->dfp_flags; __entry->nr = dfp->dfp_count; ), TP_printk("dev %d:%d optype %s intent %p item %p flags %s committed %d nr %d", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->intent, __entry->item, __print_flags(__entry->flags, "|", XFS_DEFER_PENDING_STRINGS), __entry->committed, __entry->nr) ) #define DEFINE_DEFER_PENDING_ITEM_EVENT(name) \ DEFINE_EVENT(xfs_defer_pending_item_class, name, \ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp, \ void *item), \ TP_ARGS(mp, dfp, item)) DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_add_item); DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_cancel_item); DEFINE_DEFER_PENDING_ITEM_EVENT(xfs_defer_finish_item); /* rmap tracepoints */ DECLARE_EVENT_CLASS(xfs_rmap_class, TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, xfs_extlen_t len, bool unwritten, const struct xfs_owner_info *oinfo), TP_ARGS(cur, gbno, len, unwritten, oinfo), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, gbno) __field(xfs_extlen_t, len) __field(uint64_t, owner) __field(uint64_t, offset) __field(unsigned long, flags) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->gbno = gbno; __entry->len = len; __entry->owner = oinfo->oi_owner; __entry->offset = oinfo->oi_offset; __entry->flags = oinfo->oi_flags; if (unwritten) __entry->flags |= XFS_RMAP_UNWRITTEN; ), TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%lx", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->gbno, __entry->len, __entry->owner, __entry->offset, __entry->flags) ); #define DEFINE_RMAP_EVENT(name) \ DEFINE_EVENT(xfs_rmap_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, \ xfs_agblock_t gbno, xfs_extlen_t len, bool unwritten, \ const struct xfs_owner_info *oinfo), \ TP_ARGS(cur, gbno, len, unwritten, oinfo)) /* btree cursor error/%ip tracepoint class */ DECLARE_EVENT_CLASS(xfs_btree_error_class, TP_PROTO(struct xfs_btree_cur *cur, int error, unsigned long caller_ip), TP_ARGS(cur, error, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_ino_t, ino) __field(int, error) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; switch (cur->bc_ops->type) { case XFS_BTREE_TYPE_INODE: __entry->agno = 0; __entry->ino = cur->bc_ino.ip->i_ino; break; case XFS_BTREE_TYPE_AG: __entry->agno = cur->bc_group->xg_gno; __entry->ino = 0; break; case XFS_BTREE_TYPE_MEM: __entry->agno = 0; __entry->ino = 0; break; } __entry->error = error; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d agno 0x%x ino 0x%llx error %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->ino, __entry->error, (char *)__entry->caller_ip) ); #define DEFINE_BTREE_ERROR_EVENT(name) \ DEFINE_EVENT(xfs_btree_error_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, int error, \ unsigned long caller_ip), \ TP_ARGS(cur, error, caller_ip)) DEFINE_RMAP_EVENT(xfs_rmap_unmap); DEFINE_RMAP_EVENT(xfs_rmap_unmap_done); DEFINE_BTREE_ERROR_EVENT(xfs_rmap_unmap_error); DEFINE_RMAP_EVENT(xfs_rmap_map); DEFINE_RMAP_EVENT(xfs_rmap_map_done); DEFINE_BTREE_ERROR_EVENT(xfs_rmap_map_error); DEFINE_RMAP_EVENT(xfs_rmap_convert); DEFINE_RMAP_EVENT(xfs_rmap_convert_done); DEFINE_BTREE_ERROR_EVENT(xfs_rmap_convert_error); TRACE_EVENT(xfs_rmap_convert_state, TP_PROTO(struct xfs_btree_cur *cur, int state, unsigned long caller_ip), TP_ARGS(cur, state, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(int, state) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->state = state; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d %sno 0x%x state %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->state, (char *)__entry->caller_ip) ); DECLARE_EVENT_CLASS(xfs_rmapbt_class, TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, xfs_extlen_t len, uint64_t owner, uint64_t offset, unsigned int flags), TP_ARGS(cur, gbno, len, owner, offset, flags), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, gbno) __field(xfs_extlen_t, len) __field(uint64_t, owner) __field(uint64_t, offset) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->gbno = gbno; __entry->len = len; __entry->owner = owner; __entry->offset = offset; __entry->flags = flags; ), TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->gbno, __entry->len, __entry->owner, __entry->offset, __entry->flags) ); #define DEFINE_RMAPBT_EVENT(name) \ DEFINE_EVENT(xfs_rmapbt_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, \ xfs_agblock_t gbno, xfs_extlen_t len, \ uint64_t owner, uint64_t offset, unsigned int flags), \ TP_ARGS(cur, gbno, len, owner, offset, flags)) TRACE_DEFINE_ENUM(XFS_RMAP_MAP); TRACE_DEFINE_ENUM(XFS_RMAP_MAP_SHARED); TRACE_DEFINE_ENUM(XFS_RMAP_UNMAP); TRACE_DEFINE_ENUM(XFS_RMAP_UNMAP_SHARED); TRACE_DEFINE_ENUM(XFS_RMAP_CONVERT); TRACE_DEFINE_ENUM(XFS_RMAP_CONVERT_SHARED); TRACE_DEFINE_ENUM(XFS_RMAP_ALLOC); TRACE_DEFINE_ENUM(XFS_RMAP_FREE); DECLARE_EVENT_CLASS(xfs_rmap_deferred_class, TP_PROTO(struct xfs_mount *mp, struct xfs_rmap_intent *ri), TP_ARGS(mp, ri), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long long, owner) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, gbno) __field(int, whichfork) __field(xfs_fileoff_t, l_loff) __field(xfs_filblks_t, l_len) __field(xfs_exntst_t, l_state) __field(int, op) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->type = ri->ri_group->xg_type; __entry->agno = ri->ri_group->xg_gno; __entry->gbno = xfs_fsb_to_gbno(mp, ri->ri_bmap.br_startblock, ri->ri_group->xg_type); __entry->owner = ri->ri_owner; __entry->whichfork = ri->ri_whichfork; __entry->l_loff = ri->ri_bmap.br_startoff; __entry->l_len = ri->ri_bmap.br_blockcount; __entry->l_state = ri->ri_bmap.br_state; __entry->op = ri->ri_type; ), TP_printk("dev %d:%d op %s %sno 0x%x gbno 0x%x owner 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->op, XFS_RMAP_INTENT_STRINGS), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->gbno, __entry->owner, __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), __entry->l_loff, __entry->l_len, __entry->l_state) ); #define DEFINE_RMAP_DEFERRED_EVENT(name) \ DEFINE_EVENT(xfs_rmap_deferred_class, name, \ TP_PROTO(struct xfs_mount *mp, struct xfs_rmap_intent *ri), \ TP_ARGS(mp, ri)) DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_defer); DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_deferred); DEFINE_RMAPBT_EVENT(xfs_rmap_update); DEFINE_RMAPBT_EVENT(xfs_rmap_insert); DEFINE_RMAPBT_EVENT(xfs_rmap_delete); DEFINE_BTREE_ERROR_EVENT(xfs_rmap_insert_error); DEFINE_BTREE_ERROR_EVENT(xfs_rmap_delete_error); DEFINE_BTREE_ERROR_EVENT(xfs_rmap_update_error); DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_candidate); DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_query); DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_candidate); DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range); DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_result); DEFINE_RMAPBT_EVENT(xfs_rmap_find_right_neighbor_result); DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_result); /* deferred bmbt updates */ TRACE_DEFINE_ENUM(XFS_BMAP_MAP); TRACE_DEFINE_ENUM(XFS_BMAP_UNMAP); DECLARE_EVENT_CLASS(xfs_bmap_deferred_class, TP_PROTO(struct xfs_bmap_intent *bi), TP_ARGS(bi), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_ino_t, ino) __field(unsigned long long, gbno) __field(int, whichfork) __field(xfs_fileoff_t, l_loff) __field(xfs_filblks_t, l_len) __field(xfs_exntst_t, l_state) __field(int, op) ), TP_fast_assign( struct xfs_inode *ip = bi->bi_owner; struct xfs_mount *mp = ip->i_mount; __entry->dev = mp->m_super->s_dev; __entry->type = bi->bi_group->xg_type; __entry->agno = bi->bi_group->xg_gno; if (bi->bi_group->xg_type == XG_TYPE_RTG && !xfs_has_rtgroups(mp)) { /* * Legacy rt filesystems do not have allocation groups * ondisk. We emulate this incore with one gigantic * rtgroup whose size can exceed a 32-bit block number. * For this tracepoint, we report group 0 and a 64-bit * group block number. */ __entry->gbno = bi->bi_bmap.br_startblock; } else { __entry->gbno = xfs_fsb_to_gbno(mp, bi->bi_bmap.br_startblock, bi->bi_group->xg_type); } __entry->ino = ip->i_ino; __entry->whichfork = bi->bi_whichfork; __entry->l_loff = bi->bi_bmap.br_startoff; __entry->l_len = bi->bi_bmap.br_blockcount; __entry->l_state = bi->bi_bmap.br_state; __entry->op = bi->bi_type; ), TP_printk("dev %d:%d op %s ino 0x%llx %sno 0x%x gbno 0x%llx %s fileoff 0x%llx fsbcount 0x%llx state %d", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->op, XFS_BMAP_INTENT_STRINGS), __entry->ino, __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->gbno, __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), __entry->l_loff, __entry->l_len, __entry->l_state) ); #define DEFINE_BMAP_DEFERRED_EVENT(name) \ DEFINE_EVENT(xfs_bmap_deferred_class, name, \ TP_PROTO(struct xfs_bmap_intent *bi), \ TP_ARGS(bi)) DEFINE_BMAP_DEFERRED_EVENT(xfs_bmap_defer); DEFINE_BMAP_DEFERRED_EVENT(xfs_bmap_deferred); /* per-AG reservation */ DECLARE_EVENT_CLASS(xfs_ag_resv_class, TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type resv, xfs_extlen_t len), TP_ARGS(pag, resv, len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(int, resv) __field(xfs_extlen_t, freeblks) __field(xfs_extlen_t, flcount) __field(xfs_extlen_t, reserved) __field(xfs_extlen_t, asked) __field(xfs_extlen_t, len) ), TP_fast_assign( struct xfs_ag_resv *r = xfs_perag_resv(pag, resv); __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); __entry->resv = resv; __entry->freeblks = pag->pagf_freeblks; __entry->flcount = pag->pagf_flcount; __entry->reserved = r ? r->ar_reserved : 0; __entry->asked = r ? r->ar_asked : 0; __entry->len = len; ), TP_printk("dev %d:%d agno 0x%x resv %d freeblks %u flcount %u " "resv %u ask %u len %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->resv, __entry->freeblks, __entry->flcount, __entry->reserved, __entry->asked, __entry->len) ) #define DEFINE_AG_RESV_EVENT(name) \ DEFINE_EVENT(xfs_ag_resv_class, name, \ TP_PROTO(struct xfs_perag *pag, enum xfs_ag_resv_type type, \ xfs_extlen_t len), \ TP_ARGS(pag, type, len)) /* per-AG reservation tracepoints */ DEFINE_AG_RESV_EVENT(xfs_ag_resv_init); DEFINE_AG_RESV_EVENT(xfs_ag_resv_free); DEFINE_AG_RESV_EVENT(xfs_ag_resv_alloc_extent); DEFINE_AG_RESV_EVENT(xfs_ag_resv_free_extent); DEFINE_AG_RESV_EVENT(xfs_ag_resv_critical); DEFINE_AG_RESV_EVENT(xfs_ag_resv_needed); TRACE_EVENT(xfs_ag_resv_init_error, TP_PROTO(const struct xfs_perag *pag, int error, unsigned long caller_ip), TP_ARGS(pag, error, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(int, error) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); __entry->error = error; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d agno 0x%x error %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->error, (char *)__entry->caller_ip) ); /* refcount tracepoint classes */ DECLARE_EVENT_CLASS(xfs_refcount_class, TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, xfs_extlen_t len), TP_ARGS(cur, gbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, gbno) __field(xfs_extlen_t, len) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->gbno = gbno; __entry->len = len; ), TP_printk("dev %d:%d %sno 0x%x gbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->gbno, __entry->len) ); #define DEFINE_REFCOUNT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, \ xfs_extlen_t len), \ TP_ARGS(cur, gbno, len)) TRACE_DEFINE_ENUM(XFS_LOOKUP_EQi); TRACE_DEFINE_ENUM(XFS_LOOKUP_LEi); TRACE_DEFINE_ENUM(XFS_LOOKUP_GEi); TRACE_EVENT(xfs_refcount_lookup, TP_PROTO(struct xfs_btree_cur *cur, xfs_agblock_t gbno, xfs_lookup_t dir), TP_ARGS(cur, gbno, dir), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, gbno) __field(xfs_lookup_t, dir) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->gbno = gbno; __entry->dir = dir; ), TP_printk("dev %d:%d %sno 0x%x gbno 0x%x cmp %s(%d)", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->gbno, __print_symbolic(__entry->dir, XFS_AG_BTREE_CMP_FORMAT_STR), __entry->dir) ) /* single-rcext tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_extent_class, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec), TP_ARGS(cur, irec), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, domain) __field(xfs_agblock_t, startblock) __field(xfs_extlen_t, blockcount) __field(xfs_nlink_t, refcount) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; __entry->refcount = irec->rc_refcount; ), TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS), __entry->startblock, __entry->blockcount, __entry->refcount) ) #define DEFINE_REFCOUNT_EXTENT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_extent_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec), \ TP_ARGS(cur, irec)) /* single-rcext and an agbno tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_extent_at_class, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, xfs_agblock_t gbno), TP_ARGS(cur, irec, gbno), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, domain) __field(xfs_agblock_t, startblock) __field(xfs_extlen_t, blockcount) __field(xfs_nlink_t, refcount) __field(xfs_agblock_t, gbno) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->domain = irec->rc_domain; __entry->startblock = irec->rc_startblock; __entry->blockcount = irec->rc_blockcount; __entry->refcount = irec->rc_refcount; __entry->gbno = gbno; ), TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u @ gbno 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->domain, XFS_REFC_DOMAIN_STRINGS), __entry->startblock, __entry->blockcount, __entry->refcount, __entry->gbno) ) #define DEFINE_REFCOUNT_EXTENT_AT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_extent_at_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *irec, \ xfs_agblock_t gbno), \ TP_ARGS(cur, irec, gbno)) /* double-rcext tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_class, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2), TP_ARGS(cur, i1, i2), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, i1_domain) __field(xfs_agblock_t, i1_startblock) __field(xfs_extlen_t, i1_blockcount) __field(xfs_nlink_t, i1_refcount) __field(enum xfs_refc_domain, i2_domain) __field(xfs_agblock_t, i2_startblock) __field(xfs_extlen_t, i2_blockcount) __field(xfs_nlink_t, i2_refcount) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; __entry->i1_refcount = i1->rc_refcount; __entry->i2_domain = i2->rc_domain; __entry->i2_startblock = i2->rc_startblock; __entry->i2_blockcount = i2->rc_blockcount; __entry->i2_refcount = i2->rc_refcount; ), TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- " "dom %s gbno 0x%x fsbcount 0x%x refcount %u", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i1_startblock, __entry->i1_blockcount, __entry->i1_refcount, __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i2_startblock, __entry->i2_blockcount, __entry->i2_refcount) ) #define DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_double_extent_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \ struct xfs_refcount_irec *i2), \ TP_ARGS(cur, i1, i2)) /* double-rcext and an agbno tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_double_extent_at_class, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2, xfs_agblock_t gbno), TP_ARGS(cur, i1, i2, gbno), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, i1_domain) __field(xfs_agblock_t, i1_startblock) __field(xfs_extlen_t, i1_blockcount) __field(xfs_nlink_t, i1_refcount) __field(enum xfs_refc_domain, i2_domain) __field(xfs_agblock_t, i2_startblock) __field(xfs_extlen_t, i2_blockcount) __field(xfs_nlink_t, i2_refcount) __field(xfs_agblock_t, gbno) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; __entry->i1_refcount = i1->rc_refcount; __entry->i2_domain = i2->rc_domain; __entry->i2_startblock = i2->rc_startblock; __entry->i2_blockcount = i2->rc_blockcount; __entry->i2_refcount = i2->rc_refcount; __entry->gbno = gbno; ), TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- " "dom %s gbno 0x%x fsbcount 0x%x refcount %u @ gbno 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i1_startblock, __entry->i1_blockcount, __entry->i1_refcount, __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i2_startblock, __entry->i2_blockcount, __entry->i2_refcount, __entry->gbno) ) #define DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_double_extent_at_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \ struct xfs_refcount_irec *i2, xfs_agblock_t gbno), \ TP_ARGS(cur, i1, i2, gbno)) /* triple-rcext tracepoint class */ DECLARE_EVENT_CLASS(xfs_refcount_triple_extent_class, TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, struct xfs_refcount_irec *i2, struct xfs_refcount_irec *i3), TP_ARGS(cur, i1, i2, i3), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(enum xfs_refc_domain, i1_domain) __field(xfs_agblock_t, i1_startblock) __field(xfs_extlen_t, i1_blockcount) __field(xfs_nlink_t, i1_refcount) __field(enum xfs_refc_domain, i2_domain) __field(xfs_agblock_t, i2_startblock) __field(xfs_extlen_t, i2_blockcount) __field(xfs_nlink_t, i2_refcount) __field(enum xfs_refc_domain, i3_domain) __field(xfs_agblock_t, i3_startblock) __field(xfs_extlen_t, i3_blockcount) __field(xfs_nlink_t, i3_refcount) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __entry->type = cur->bc_group->xg_type; __entry->agno = cur->bc_group->xg_gno; __entry->i1_domain = i1->rc_domain; __entry->i1_startblock = i1->rc_startblock; __entry->i1_blockcount = i1->rc_blockcount; __entry->i1_refcount = i1->rc_refcount; __entry->i2_domain = i2->rc_domain; __entry->i2_startblock = i2->rc_startblock; __entry->i2_blockcount = i2->rc_blockcount; __entry->i2_refcount = i2->rc_refcount; __entry->i3_domain = i3->rc_domain; __entry->i3_startblock = i3->rc_startblock; __entry->i3_blockcount = i3->rc_blockcount; __entry->i3_refcount = i3->rc_refcount; ), TP_printk("dev %d:%d %sno 0x%x dom %s gbno 0x%x fsbcount 0x%x refcount %u -- " "dom %s gbno 0x%x fsbcount 0x%x refcount %u -- " "dom %s gbno 0x%x fsbcount 0x%x refcount %u", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __print_symbolic(__entry->i1_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i1_startblock, __entry->i1_blockcount, __entry->i1_refcount, __print_symbolic(__entry->i2_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i2_startblock, __entry->i2_blockcount, __entry->i2_refcount, __print_symbolic(__entry->i3_domain, XFS_REFC_DOMAIN_STRINGS), __entry->i3_startblock, __entry->i3_blockcount, __entry->i3_refcount) ); #define DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(name) \ DEFINE_EVENT(xfs_refcount_triple_extent_class, name, \ TP_PROTO(struct xfs_btree_cur *cur, struct xfs_refcount_irec *i1, \ struct xfs_refcount_irec *i2, struct xfs_refcount_irec *i3), \ TP_ARGS(cur, i1, i2, i3)) /* refcount btree tracepoints */ DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_get); DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_update); DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_insert); DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_delete); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_insert_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_delete_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_update_error); /* refcount adjustment tracepoints */ DEFINE_REFCOUNT_EVENT(xfs_refcount_increase); DEFINE_REFCOUNT_EVENT(xfs_refcount_decrease); DEFINE_REFCOUNT_EVENT(xfs_refcount_cow_increase); DEFINE_REFCOUNT_EVENT(xfs_refcount_cow_decrease); DEFINE_REFCOUNT_TRIPLE_EXTENT_EVENT(xfs_refcount_merge_center_extents); DEFINE_REFCOUNT_EXTENT_EVENT(xfs_refcount_modify_extent); DEFINE_REFCOUNT_EXTENT_AT_EVENT(xfs_refcount_split_extent); DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_left_extent); DEFINE_REFCOUNT_DOUBLE_EXTENT_EVENT(xfs_refcount_merge_right_extent); DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_left_extent); DEFINE_REFCOUNT_DOUBLE_EXTENT_AT_EVENT(xfs_refcount_find_right_extent); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_adjust_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_adjust_cow_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_center_extents_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_modify_extent_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_split_extent_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_left_extent_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_merge_right_extent_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_left_extent_error); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_right_extent_error); /* reflink helpers */ DEFINE_REFCOUNT_EVENT(xfs_refcount_find_shared); DEFINE_REFCOUNT_EVENT(xfs_refcount_find_shared_result); DEFINE_BTREE_ERROR_EVENT(xfs_refcount_find_shared_error); TRACE_DEFINE_ENUM(XFS_REFCOUNT_INCREASE); TRACE_DEFINE_ENUM(XFS_REFCOUNT_DECREASE); TRACE_DEFINE_ENUM(XFS_REFCOUNT_ALLOC_COW); TRACE_DEFINE_ENUM(XFS_REFCOUNT_FREE_COW); DECLARE_EVENT_CLASS(xfs_refcount_deferred_class, TP_PROTO(struct xfs_mount *mp, struct xfs_refcount_intent *refc), TP_ARGS(mp, refc), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(xfs_agnumber_t, agno) __field(int, op) __field(xfs_agblock_t, gbno) __field(xfs_extlen_t, len) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->type = refc->ri_group->xg_type; __entry->agno = refc->ri_group->xg_gno; __entry->op = refc->ri_type; __entry->gbno = xfs_fsb_to_gbno(mp, refc->ri_startblock, refc->ri_group->xg_type); __entry->len = refc->ri_blockcount; ), TP_printk("dev %d:%d op %s %sno 0x%x gbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->op, XFS_REFCOUNT_INTENT_STRINGS), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->agno, __entry->gbno, __entry->len) ); #define DEFINE_REFCOUNT_DEFERRED_EVENT(name) \ DEFINE_EVENT(xfs_refcount_deferred_class, name, \ TP_PROTO(struct xfs_mount *mp, struct xfs_refcount_intent *refc), \ TP_ARGS(mp, refc)) DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_defer); DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_deferred); DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_finish_one_leftover); /* simple inode-based error/%ip tracepoint class */ DECLARE_EVENT_CLASS(xfs_inode_error_class, TP_PROTO(struct xfs_inode *ip, int error, unsigned long caller_ip), TP_ARGS(ip, error, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(int, error) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->error = error; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d ino 0x%llx error %d caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->error, (char *)__entry->caller_ip) ); #define DEFINE_INODE_ERROR_EVENT(name) \ DEFINE_EVENT(xfs_inode_error_class, name, \ TP_PROTO(struct xfs_inode *ip, int error, \ unsigned long caller_ip), \ TP_ARGS(ip, error, caller_ip)) /* reflink tracepoint classes */ /* two-file io tracepoint class */ DECLARE_EVENT_CLASS(xfs_double_io_class, TP_PROTO(struct xfs_inode *src, xfs_off_t soffset, xfs_off_t len, struct xfs_inode *dest, xfs_off_t doffset), TP_ARGS(src, soffset, len, dest, doffset), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, src_ino) __field(loff_t, src_isize) __field(loff_t, src_disize) __field(loff_t, src_offset) __field(long long, len) __field(xfs_ino_t, dest_ino) __field(loff_t, dest_isize) __field(loff_t, dest_disize) __field(loff_t, dest_offset) ), TP_fast_assign( __entry->dev = VFS_I(src)->i_sb->s_dev; __entry->src_ino = src->i_ino; __entry->src_isize = VFS_I(src)->i_size; __entry->src_disize = src->i_disk_size; __entry->src_offset = soffset; __entry->len = len; __entry->dest_ino = dest->i_ino; __entry->dest_isize = VFS_I(dest)->i_size; __entry->dest_disize = dest->i_disk_size; __entry->dest_offset = doffset; ), TP_printk("dev %d:%d bytecount 0x%llx " "ino 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx -> " "ino 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->len, __entry->src_ino, __entry->src_isize, __entry->src_disize, __entry->src_offset, __entry->dest_ino, __entry->dest_isize, __entry->dest_disize, __entry->dest_offset) ) #define DEFINE_DOUBLE_IO_EVENT(name) \ DEFINE_EVENT(xfs_double_io_class, name, \ TP_PROTO(struct xfs_inode *src, xfs_off_t soffset, xfs_off_t len, \ struct xfs_inode *dest, xfs_off_t doffset), \ TP_ARGS(src, soffset, len, dest, doffset)) /* inode/irec events */ DECLARE_EVENT_CLASS(xfs_inode_irec_class, TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec), TP_ARGS(ip, irec), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(xfs_fileoff_t, lblk) __field(xfs_extlen_t, len) __field(xfs_fsblock_t, pblk) __field(int, state) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->lblk = irec->br_startoff; __entry->len = irec->br_blockcount; __entry->pblk = irec->br_startblock; __entry->state = irec->br_state; ), TP_printk("dev %d:%d ino 0x%llx fileoff 0x%llx fsbcount 0x%x startblock 0x%llx st %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->lblk, __entry->len, __entry->pblk, __entry->state) ); #define DEFINE_INODE_IREC_EVENT(name) \ DEFINE_EVENT(xfs_inode_irec_class, name, \ TP_PROTO(struct xfs_inode *ip, struct xfs_bmbt_irec *irec), \ TP_ARGS(ip, irec)) /* inode iomap invalidation events */ DECLARE_EVENT_CLASS(xfs_wb_invalid_class, TP_PROTO(struct xfs_inode *ip, const struct iomap *iomap, unsigned int wpcseq, int whichfork), TP_ARGS(ip, iomap, wpcseq, whichfork), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(u64, addr) __field(loff_t, pos) __field(u64, len) __field(u16, type) __field(u16, flags) __field(u32, wpcseq) __field(u32, forkseq) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->addr = iomap->addr; __entry->pos = iomap->offset; __entry->len = iomap->length; __entry->type = iomap->type; __entry->flags = iomap->flags; __entry->wpcseq = wpcseq; __entry->forkseq = READ_ONCE(xfs_ifork_ptr(ip, whichfork)->if_seq); ), TP_printk("dev %d:%d ino 0x%llx pos 0x%llx addr 0x%llx bytecount 0x%llx type 0x%x flags 0x%x wpcseq 0x%x forkseq 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->pos, __entry->addr, __entry->len, __entry->type, __entry->flags, __entry->wpcseq, __entry->forkseq) ); #define DEFINE_WB_INVALID_EVENT(name) \ DEFINE_EVENT(xfs_wb_invalid_class, name, \ TP_PROTO(struct xfs_inode *ip, const struct iomap *iomap, unsigned int wpcseq, int whichfork), \ TP_ARGS(ip, iomap, wpcseq, whichfork)) DEFINE_WB_INVALID_EVENT(xfs_wb_cow_iomap_invalid); DEFINE_WB_INVALID_EVENT(xfs_wb_data_iomap_invalid); DECLARE_EVENT_CLASS(xfs_iomap_invalid_class, TP_PROTO(struct xfs_inode *ip, const struct iomap *iomap), TP_ARGS(ip, iomap), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(u64, addr) __field(loff_t, pos) __field(u64, len) __field(u64, validity_cookie) __field(u64, inodeseq) __field(u16, type) __field(u16, flags) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->ino = ip->i_ino; __entry->addr = iomap->addr; __entry->pos = iomap->offset; __entry->len = iomap->length; __entry->validity_cookie = iomap->validity_cookie; __entry->type = iomap->type; __entry->flags = iomap->flags; __entry->inodeseq = xfs_iomap_inode_sequence(ip, iomap->flags); ), TP_printk("dev %d:%d ino 0x%llx pos 0x%llx addr 0x%llx bytecount 0x%llx type 0x%x flags 0x%x validity_cookie 0x%llx inodeseq 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->pos, __entry->addr, __entry->len, __entry->type, __entry->flags, __entry->validity_cookie, __entry->inodeseq) ); #define DEFINE_IOMAP_INVALID_EVENT(name) \ DEFINE_EVENT(xfs_iomap_invalid_class, name, \ TP_PROTO(struct xfs_inode *ip, const struct iomap *iomap), \ TP_ARGS(ip, iomap)) DEFINE_IOMAP_INVALID_EVENT(xfs_iomap_invalid); /* refcount/reflink tracepoint definitions */ /* reflink tracepoints */ DEFINE_INODE_EVENT(xfs_reflink_set_inode_flag); DEFINE_INODE_EVENT(xfs_reflink_unset_inode_flag); DEFINE_ITRUNC_EVENT(xfs_reflink_update_inode_size); TRACE_EVENT(xfs_reflink_remap_blocks, TP_PROTO(struct xfs_inode *src, xfs_fileoff_t soffset, xfs_filblks_t len, struct xfs_inode *dest, xfs_fileoff_t doffset), TP_ARGS(src, soffset, len, dest, doffset), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, src_ino) __field(xfs_fileoff_t, src_lblk) __field(xfs_filblks_t, len) __field(xfs_ino_t, dest_ino) __field(xfs_fileoff_t, dest_lblk) ), TP_fast_assign( __entry->dev = VFS_I(src)->i_sb->s_dev; __entry->src_ino = src->i_ino; __entry->src_lblk = soffset; __entry->len = len; __entry->dest_ino = dest->i_ino; __entry->dest_lblk = doffset; ), TP_printk("dev %d:%d fsbcount 0x%llx " "ino 0x%llx fileoff 0x%llx -> ino 0x%llx fileoff 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->len, __entry->src_ino, __entry->src_lblk, __entry->dest_ino, __entry->dest_lblk) ); DEFINE_DOUBLE_IO_EVENT(xfs_reflink_remap_range); DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_range_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_set_inode_flag_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_update_inode_size_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_blocks_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_extent_error); DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_src); DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_dest); /* dedupe tracepoints */ DEFINE_DOUBLE_IO_EVENT(xfs_reflink_compare_extents); DEFINE_INODE_ERROR_EVENT(xfs_reflink_compare_extents_error); /* ioctl tracepoints */ TRACE_EVENT(xfs_ioctl_clone, TP_PROTO(struct inode *src, struct inode *dest), TP_ARGS(src, dest), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, src_ino) __field(loff_t, src_isize) __field(unsigned long, dest_ino) __field(loff_t, dest_isize) ), TP_fast_assign( __entry->dev = src->i_sb->s_dev; __entry->src_ino = src->i_ino; __entry->src_isize = i_size_read(src); __entry->dest_ino = dest->i_ino; __entry->dest_isize = i_size_read(dest); ), TP_printk("dev %d:%d ino 0x%lx isize 0x%llx -> ino 0x%lx isize 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->src_ino, __entry->src_isize, __entry->dest_ino, __entry->dest_isize) ); /* unshare tracepoints */ DEFINE_SIMPLE_IO_EVENT(xfs_reflink_unshare); DEFINE_INODE_ERROR_EVENT(xfs_reflink_unshare_error); /* copy on write */ DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_around_shared); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc); DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow); DEFINE_SIMPLE_IO_EVENT(xfs_reflink_cancel_cow_range); DEFINE_SIMPLE_IO_EVENT(xfs_reflink_end_cow); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_from); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_to); DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_remap_skip); DEFINE_INODE_ERROR_EVENT(xfs_reflink_cancel_cow_range_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_end_cow_error); DEFINE_INODE_IREC_EVENT(xfs_reflink_cancel_cow); /* rmap swapext tracepoints */ DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap); DEFINE_INODE_IREC_EVENT(xfs_swap_extent_rmap_remap_piece); DEFINE_INODE_ERROR_EVENT(xfs_swap_extent_rmap_error); /* fsmap traces */ TRACE_EVENT(xfs_fsmap_mapping, TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno, const struct xfs_fsmap_irec *frec), TP_ARGS(mp, keydev, agno, frec), TP_STRUCT__entry( __field(dev_t, dev) __field(dev_t, keydev) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(xfs_daddr_t, start_daddr) __field(xfs_daddr_t, len_daddr) __field(uint64_t, owner) __field(uint64_t, offset) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->keydev = new_decode_dev(keydev); __entry->agno = agno; __entry->agbno = frec->rec_key; __entry->start_daddr = frec->start_daddr; __entry->len_daddr = frec->len_daddr; __entry->owner = frec->owner; __entry->offset = frec->offset; __entry->flags = frec->rm_flags; ), TP_printk("dev %d:%d keydev %d:%d agno 0x%x gbno 0x%x start_daddr 0x%llx len_daddr 0x%llx owner 0x%llx fileoff 0x%llx flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->keydev), MINOR(__entry->keydev), __entry->agno, __entry->agbno, __entry->start_daddr, __entry->len_daddr, __entry->owner, __entry->offset, __entry->flags) ); DECLARE_EVENT_CLASS(xfs_fsmap_group_key_class, TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno, const struct xfs_rmap_irec *rmap), TP_ARGS(mp, keydev, agno, rmap), TP_STRUCT__entry( __field(dev_t, dev) __field(dev_t, keydev) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(uint64_t, owner) __field(uint64_t, offset) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->keydev = new_decode_dev(keydev); __entry->agno = agno; __entry->agbno = rmap->rm_startblock; __entry->owner = rmap->rm_owner; __entry->offset = rmap->rm_offset; __entry->flags = rmap->rm_flags; ), TP_printk("dev %d:%d keydev %d:%d agno 0x%x startblock 0x%x owner 0x%llx fileoff 0x%llx flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->keydev), MINOR(__entry->keydev), __entry->agno, __entry->agbno, __entry->owner, __entry->offset, __entry->flags) ) #define DEFINE_FSMAP_GROUP_KEY_EVENT(name) \ DEFINE_EVENT(xfs_fsmap_group_key_class, name, \ TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_agnumber_t agno, \ const struct xfs_rmap_irec *rmap), \ TP_ARGS(mp, keydev, agno, rmap)) DEFINE_FSMAP_GROUP_KEY_EVENT(xfs_fsmap_low_group_key); DEFINE_FSMAP_GROUP_KEY_EVENT(xfs_fsmap_high_group_key); DECLARE_EVENT_CLASS(xfs_fsmap_linear_key_class, TP_PROTO(struct xfs_mount *mp, u32 keydev, xfs_fsblock_t bno), TP_ARGS(mp, keydev, bno), TP_STRUCT__entry( __field(dev_t, dev) __field(dev_t, keydev) __field(xfs_fsblock_t, bno) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->keydev = new_decode_dev(keydev); __entry->bno = bno; ), TP_printk("dev %d:%d keydev %d:%d bno 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->keydev), MINOR(__entry->keydev), __entry->bno) ) #define DEFINE_FSMAP_LINEAR_KEY_EVENT(name) \ DEFINE_EVENT(xfs_fsmap_linear_key_class, name, \ TP_PROTO(struct xfs_mount *mp, u32 keydev, uint64_t bno), \ TP_ARGS(mp, keydev, bno)) DEFINE_FSMAP_LINEAR_KEY_EVENT(xfs_fsmap_low_linear_key); DEFINE_FSMAP_LINEAR_KEY_EVENT(xfs_fsmap_high_linear_key); DECLARE_EVENT_CLASS(xfs_getfsmap_class, TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap), TP_ARGS(mp, fsmap), TP_STRUCT__entry( __field(dev_t, dev) __field(dev_t, keydev) __field(xfs_daddr_t, block) __field(xfs_daddr_t, len) __field(uint64_t, owner) __field(uint64_t, offset) __field(uint64_t, flags) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->keydev = new_decode_dev(fsmap->fmr_device); __entry->block = fsmap->fmr_physical; __entry->len = fsmap->fmr_length; __entry->owner = fsmap->fmr_owner; __entry->offset = fsmap->fmr_offset; __entry->flags = fsmap->fmr_flags; ), TP_printk("dev %d:%d keydev %d:%d daddr 0x%llx bbcount 0x%llx owner 0x%llx fileoff_daddr 0x%llx flags 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->keydev), MINOR(__entry->keydev), __entry->block, __entry->len, __entry->owner, __entry->offset, __entry->flags) ) #define DEFINE_GETFSMAP_EVENT(name) \ DEFINE_EVENT(xfs_getfsmap_class, name, \ TP_PROTO(struct xfs_mount *mp, struct xfs_fsmap *fsmap), \ TP_ARGS(mp, fsmap)) DEFINE_GETFSMAP_EVENT(xfs_getfsmap_low_key); DEFINE_GETFSMAP_EVENT(xfs_getfsmap_high_key); DEFINE_GETFSMAP_EVENT(xfs_getfsmap_mapping); DECLARE_EVENT_CLASS(xfs_trans_resv_class, TP_PROTO(struct xfs_mount *mp, unsigned int type, struct xfs_trans_res *res), TP_ARGS(mp, type, res), TP_STRUCT__entry( __field(dev_t, dev) __field(int, type) __field(uint, logres) __field(int, logcount) __field(int, logflags) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->type = type; __entry->logres = res->tr_logres; __entry->logcount = res->tr_logcount; __entry->logflags = res->tr_logflags; ), TP_printk("dev %d:%d type %d logres %u logcount %d flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->type, __entry->logres, __entry->logcount, __entry->logflags) ) #define DEFINE_TRANS_RESV_EVENT(name) \ DEFINE_EVENT(xfs_trans_resv_class, name, \ TP_PROTO(struct xfs_mount *mp, unsigned int type, \ struct xfs_trans_res *res), \ TP_ARGS(mp, type, res)) DEFINE_TRANS_RESV_EVENT(xfs_trans_resv_calc); DEFINE_TRANS_RESV_EVENT(xfs_trans_resv_calc_minlogsize); TRACE_EVENT(xfs_log_get_max_trans_res, TP_PROTO(struct xfs_mount *mp, const struct xfs_trans_res *res), TP_ARGS(mp, res), TP_STRUCT__entry( __field(dev_t, dev) __field(uint, logres) __field(int, logcount) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->logres = res->tr_logres; __entry->logcount = res->tr_logcount; ), TP_printk("dev %d:%d logres %u logcount %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->logres, __entry->logcount) ); DECLARE_EVENT_CLASS(xfs_trans_class, TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), TP_ARGS(tp, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(uint32_t, tid) __field(uint32_t, flags) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = tp->t_mountp->m_super->s_dev; __entry->tid = 0; if (tp->t_ticket) __entry->tid = tp->t_ticket->t_tid; __entry->flags = tp->t_flags; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d trans %x flags 0x%x caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, __entry->flags, (char *)__entry->caller_ip) ) #define DEFINE_TRANS_EVENT(name) \ DEFINE_EVENT(xfs_trans_class, name, \ TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \ TP_ARGS(tp, caller_ip)) DEFINE_TRANS_EVENT(xfs_trans_alloc); DEFINE_TRANS_EVENT(xfs_trans_cancel); DEFINE_TRANS_EVENT(xfs_trans_commit); DEFINE_TRANS_EVENT(xfs_trans_dup); DEFINE_TRANS_EVENT(xfs_trans_free); DEFINE_TRANS_EVENT(xfs_trans_roll); DEFINE_TRANS_EVENT(xfs_trans_add_item); DEFINE_TRANS_EVENT(xfs_trans_commit_items); DEFINE_TRANS_EVENT(xfs_trans_free_items); TRACE_EVENT(xfs_iunlink_update_bucket, TP_PROTO(const struct xfs_perag *pag, unsigned int bucket, xfs_agino_t old_ptr, xfs_agino_t new_ptr), TP_ARGS(pag, bucket, old_ptr, new_ptr), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(unsigned int, bucket) __field(xfs_agino_t, old_ptr) __field(xfs_agino_t, new_ptr) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); __entry->bucket = bucket; __entry->old_ptr = old_ptr; __entry->new_ptr = new_ptr; ), TP_printk("dev %d:%d agno 0x%x bucket %u old 0x%x new 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->bucket, __entry->old_ptr, __entry->new_ptr) ); TRACE_EVENT(xfs_iunlink_update_dinode, TP_PROTO(const struct xfs_iunlink_item *iup, xfs_agino_t old_ptr), TP_ARGS(iup, old_ptr), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agino_t, agino) __field(xfs_agino_t, old_ptr) __field(xfs_agino_t, new_ptr) ), TP_fast_assign( __entry->dev = pag_mount(iup->pag)->m_super->s_dev; __entry->agno = pag_agno(iup->pag); __entry->agino = XFS_INO_TO_AGINO(iup->ip->i_mount, iup->ip->i_ino); __entry->old_ptr = old_ptr; __entry->new_ptr = iup->next_agino; ), TP_printk("dev %d:%d agno 0x%x agino 0x%x old 0x%x new 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agino, __entry->old_ptr, __entry->new_ptr) ); TRACE_EVENT(xfs_iunlink_reload_next, TP_PROTO(struct xfs_inode *ip), TP_ARGS(ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agino_t, agino) __field(xfs_agino_t, prev_agino) __field(xfs_agino_t, next_agino) ), TP_fast_assign( __entry->dev = ip->i_mount->m_super->s_dev; __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino); __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino); __entry->prev_agino = ip->i_prev_unlinked; __entry->next_agino = ip->i_next_unlinked; ), TP_printk("dev %d:%d agno 0x%x agino 0x%x prev_unlinked 0x%x next_unlinked 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agino, __entry->prev_agino, __entry->next_agino) ); TRACE_EVENT(xfs_inode_reload_unlinked_bucket, TP_PROTO(struct xfs_inode *ip), TP_ARGS(ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agino_t, agino) ), TP_fast_assign( __entry->dev = ip->i_mount->m_super->s_dev; __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino); __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino); ), TP_printk("dev %d:%d agno 0x%x agino 0x%x bucket %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agino, __entry->agino % XFS_AGI_UNLINKED_BUCKETS) ); DECLARE_EVENT_CLASS(xfs_ag_inode_class, TP_PROTO(struct xfs_inode *ip), TP_ARGS(ip), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agino_t, agino) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino); __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino); ), TP_printk("dev %d:%d agno 0x%x agino 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->agino) ) #define DEFINE_AGINODE_EVENT(name) \ DEFINE_EVENT(xfs_ag_inode_class, name, \ TP_PROTO(struct xfs_inode *ip), \ TP_ARGS(ip)) DEFINE_AGINODE_EVENT(xfs_iunlink); DEFINE_AGINODE_EVENT(xfs_iunlink_remove); DECLARE_EVENT_CLASS(xfs_fs_corrupt_class, TP_PROTO(struct xfs_mount *mp, unsigned int flags), TP_ARGS(mp, flags), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->flags = flags; ), TP_printk("dev %d:%d flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->flags) ); #define DEFINE_FS_CORRUPT_EVENT(name) \ DEFINE_EVENT(xfs_fs_corrupt_class, name, \ TP_PROTO(struct xfs_mount *mp, unsigned int flags), \ TP_ARGS(mp, flags)) DEFINE_FS_CORRUPT_EVENT(xfs_fs_mark_sick); DEFINE_FS_CORRUPT_EVENT(xfs_fs_mark_corrupt); DEFINE_FS_CORRUPT_EVENT(xfs_fs_mark_healthy); DEFINE_FS_CORRUPT_EVENT(xfs_fs_unfixed_corruption); DECLARE_EVENT_CLASS(xfs_group_corrupt_class, TP_PROTO(const struct xfs_group *xg, unsigned int flags), TP_ARGS(xg, flags), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(uint32_t, index) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = xg->xg_mount->m_super->s_dev; __entry->type = xg->xg_type; __entry->index = xg->xg_gno; __entry->flags = flags; ), TP_printk("dev %d:%d %sno 0x%x flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->index, __entry->flags) ); #define DEFINE_GROUP_CORRUPT_EVENT(name) \ DEFINE_EVENT(xfs_group_corrupt_class, name, \ TP_PROTO(const struct xfs_group *xg, unsigned int flags), \ TP_ARGS(xg, flags)) DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_sick); DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_corrupt); DEFINE_GROUP_CORRUPT_EVENT(xfs_group_mark_healthy); DEFINE_GROUP_CORRUPT_EVENT(xfs_group_unfixed_corruption); DECLARE_EVENT_CLASS(xfs_inode_corrupt_class, TP_PROTO(struct xfs_inode *ip, unsigned int flags), TP_ARGS(ip, flags), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(unsigned int, flags) ), TP_fast_assign( __entry->dev = ip->i_mount->m_super->s_dev; __entry->ino = ip->i_ino; __entry->flags = flags; ), TP_printk("dev %d:%d ino 0x%llx flags 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->flags) ); #define DEFINE_INODE_CORRUPT_EVENT(name) \ DEFINE_EVENT(xfs_inode_corrupt_class, name, \ TP_PROTO(struct xfs_inode *ip, unsigned int flags), \ TP_ARGS(ip, flags)) DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_sick); DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_corrupt); DEFINE_INODE_CORRUPT_EVENT(xfs_inode_mark_healthy); DEFINE_INODE_CORRUPT_EVENT(xfs_inode_unfixed_corruption); TRACE_EVENT(xfs_iwalk_ag_rec, TP_PROTO(const struct xfs_perag *pag, \ struct xfs_inobt_rec_incore *irec), TP_ARGS(pag, irec), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(xfs_agino_t, startino) __field(uint64_t, freemask) ), TP_fast_assign( __entry->dev = pag_mount(pag)->m_super->s_dev; __entry->agno = pag_agno(pag); __entry->startino = irec->ir_startino; __entry->freemask = irec->ir_free; ), TP_printk("dev %d:%d agno 0x%x startino 0x%x freemask 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->agno, __entry->startino, __entry->freemask) ) TRACE_EVENT(xfs_pwork_init, TP_PROTO(struct xfs_mount *mp, unsigned int nr_threads, pid_t pid), TP_ARGS(mp, nr_threads, pid), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, nr_threads) __field(pid_t, pid) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->nr_threads = nr_threads; __entry->pid = pid; ), TP_printk("dev %d:%d nr_threads %u pid %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_threads, __entry->pid) ) TRACE_EVENT(xfs_check_new_dalign, TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino), TP_ARGS(mp, new_dalign, calc_rootino), TP_STRUCT__entry( __field(dev_t, dev) __field(int, new_dalign) __field(xfs_ino_t, sb_rootino) __field(xfs_ino_t, calc_rootino) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->new_dalign = new_dalign; __entry->sb_rootino = mp->m_sb.sb_rootino; __entry->calc_rootino = calc_rootino; ), TP_printk("dev %d:%d new_dalign %d sb_rootino 0x%llx calc_rootino 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->new_dalign, __entry->sb_rootino, __entry->calc_rootino) ) TRACE_EVENT(xfs_btree_commit_afakeroot, TP_PROTO(struct xfs_btree_cur *cur), TP_ARGS(cur), TP_STRUCT__entry( __field(dev_t, dev) __string(name, cur->bc_ops->name) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(unsigned int, levels) __field(unsigned int, blocks) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __assign_str(name); __entry->agno = cur->bc_group->xg_gno; __entry->agbno = cur->bc_ag.afake->af_root; __entry->levels = cur->bc_ag.afake->af_levels; __entry->blocks = cur->bc_ag.afake->af_blocks; ), TP_printk("dev %d:%d %sbt agno 0x%x levels %u blocks %u root %u", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->agno, __entry->levels, __entry->blocks, __entry->agbno) ) TRACE_EVENT(xfs_btree_commit_ifakeroot, TP_PROTO(struct xfs_btree_cur *cur), TP_ARGS(cur), TP_STRUCT__entry( __field(dev_t, dev) __string(name, cur->bc_ops->name) __field(xfs_agnumber_t, agno) __field(xfs_agino_t, agino) __field(unsigned int, levels) __field(unsigned int, blocks) __field(int, whichfork) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __assign_str(name); __entry->agno = XFS_INO_TO_AGNO(cur->bc_mp, cur->bc_ino.ip->i_ino); __entry->agino = XFS_INO_TO_AGINO(cur->bc_mp, cur->bc_ino.ip->i_ino); __entry->levels = cur->bc_ino.ifake->if_levels; __entry->blocks = cur->bc_ino.ifake->if_blocks; __entry->whichfork = cur->bc_ino.whichfork; ), TP_printk("dev %d:%d %sbt agno 0x%x agino 0x%x whichfork %s levels %u blocks %u", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->agno, __entry->agino, __print_symbolic(__entry->whichfork, XFS_WHICHFORK_STRINGS), __entry->levels, __entry->blocks) ) TRACE_EVENT(xfs_btree_bload_level_geometry, TP_PROTO(struct xfs_btree_cur *cur, unsigned int level, uint64_t nr_this_level, unsigned int nr_per_block, unsigned int desired_npb, uint64_t blocks, uint64_t blocks_with_extra), TP_ARGS(cur, level, nr_this_level, nr_per_block, desired_npb, blocks, blocks_with_extra), TP_STRUCT__entry( __field(dev_t, dev) __string(name, cur->bc_ops->name) __field(unsigned int, level) __field(unsigned int, nlevels) __field(uint64_t, nr_this_level) __field(unsigned int, nr_per_block) __field(unsigned int, desired_npb) __field(unsigned long long, blocks) __field(unsigned long long, blocks_with_extra) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __assign_str(name); __entry->level = level; __entry->nlevels = cur->bc_nlevels; __entry->nr_this_level = nr_this_level; __entry->nr_per_block = nr_per_block; __entry->desired_npb = desired_npb; __entry->blocks = blocks; __entry->blocks_with_extra = blocks_with_extra; ), TP_printk("dev %d:%d %sbt level %u/%u nr_this_level %llu nr_per_block %u desired_npb %u blocks %llu blocks_with_extra %llu", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->level, __entry->nlevels, __entry->nr_this_level, __entry->nr_per_block, __entry->desired_npb, __entry->blocks, __entry->blocks_with_extra) ) TRACE_EVENT(xfs_btree_bload_block, TP_PROTO(struct xfs_btree_cur *cur, unsigned int level, uint64_t block_idx, uint64_t nr_blocks, union xfs_btree_ptr *ptr, unsigned int nr_records), TP_ARGS(cur, level, block_idx, nr_blocks, ptr, nr_records), TP_STRUCT__entry( __field(dev_t, dev) __string(name, cur->bc_ops->name) __field(unsigned int, level) __field(unsigned long long, block_idx) __field(unsigned long long, nr_blocks) __field(xfs_agnumber_t, agno) __field(xfs_agblock_t, agbno) __field(unsigned int, nr_records) ), TP_fast_assign( __entry->dev = cur->bc_mp->m_super->s_dev; __assign_str(name); __entry->level = level; __entry->block_idx = block_idx; __entry->nr_blocks = nr_blocks; if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) { xfs_fsblock_t fsb = be64_to_cpu(ptr->l); __entry->agno = XFS_FSB_TO_AGNO(cur->bc_mp, fsb); __entry->agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsb); } else { __entry->agno = cur->bc_group->xg_gno; __entry->agbno = be32_to_cpu(ptr->s); } __entry->nr_records = nr_records; ), TP_printk("dev %d:%d %sbt level %u block %llu/%llu agno 0x%x agbno 0x%x recs %u", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(name), __entry->level, __entry->block_idx, __entry->nr_blocks, __entry->agno, __entry->agbno, __entry->nr_records) ) DECLARE_EVENT_CLASS(xfs_timestamp_range_class, TP_PROTO(struct xfs_mount *mp, time64_t min, time64_t max), TP_ARGS(mp, min, max), TP_STRUCT__entry( __field(dev_t, dev) __field(long long, min) __field(long long, max) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->min = min; __entry->max = max; ), TP_printk("dev %d:%d min %lld max %lld", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->min, __entry->max) ) #define DEFINE_TIMESTAMP_RANGE_EVENT(name) \ DEFINE_EVENT(xfs_timestamp_range_class, name, \ TP_PROTO(struct xfs_mount *mp, long long min, long long max), \ TP_ARGS(mp, min, max)) DEFINE_TIMESTAMP_RANGE_EVENT(xfs_inode_timestamp_range); DEFINE_TIMESTAMP_RANGE_EVENT(xfs_quota_expiry_range); DECLARE_EVENT_CLASS(xfs_icwalk_class, TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw, unsigned long caller_ip), TP_ARGS(mp, icw, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(__u32, flags) __field(uint32_t, uid) __field(uint32_t, gid) __field(prid_t, prid) __field(__u64, min_file_size) __field(long, scan_limit) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->flags = icw ? icw->icw_flags : 0; __entry->uid = icw ? from_kuid(mp->m_super->s_user_ns, icw->icw_uid) : 0; __entry->gid = icw ? from_kgid(mp->m_super->s_user_ns, icw->icw_gid) : 0; __entry->prid = icw ? icw->icw_prid : 0; __entry->min_file_size = icw ? icw->icw_min_file_size : 0; __entry->scan_limit = icw ? icw->icw_scan_limit : 0; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d flags 0x%x uid %u gid %u prid %u minsize %llu scan_limit %ld caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->flags, __entry->uid, __entry->gid, __entry->prid, __entry->min_file_size, __entry->scan_limit, (char *)__entry->caller_ip) ); #define DEFINE_ICWALK_EVENT(name) \ DEFINE_EVENT(xfs_icwalk_class, name, \ TP_PROTO(struct xfs_mount *mp, struct xfs_icwalk *icw, \ unsigned long caller_ip), \ TP_ARGS(mp, icw, caller_ip)) DEFINE_ICWALK_EVENT(xfs_ioc_free_eofblocks); DEFINE_ICWALK_EVENT(xfs_blockgc_free_space); TRACE_DEFINE_ENUM(XLOG_STATE_ACTIVE); TRACE_DEFINE_ENUM(XLOG_STATE_WANT_SYNC); TRACE_DEFINE_ENUM(XLOG_STATE_SYNCING); TRACE_DEFINE_ENUM(XLOG_STATE_DONE_SYNC); TRACE_DEFINE_ENUM(XLOG_STATE_CALLBACK); TRACE_DEFINE_ENUM(XLOG_STATE_DIRTY); DECLARE_EVENT_CLASS(xlog_iclog_class, TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip), TP_ARGS(iclog, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(uint32_t, state) __field(int32_t, refcount) __field(uint32_t, offset) __field(uint32_t, flags) __field(unsigned long long, lsn) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = iclog->ic_log->l_mp->m_super->s_dev; __entry->state = iclog->ic_state; __entry->refcount = atomic_read(&iclog->ic_refcnt); __entry->offset = iclog->ic_offset; __entry->flags = iclog->ic_flags; __entry->lsn = be64_to_cpu(iclog->ic_header.h_lsn); __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d state %s refcnt %d offset %u lsn 0x%llx flags %s caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->state, XLOG_STATE_STRINGS), __entry->refcount, __entry->offset, __entry->lsn, __print_flags(__entry->flags, "|", XLOG_ICL_STRINGS), (char *)__entry->caller_ip) ); #define DEFINE_ICLOG_EVENT(name) \ DEFINE_EVENT(xlog_iclog_class, name, \ TP_PROTO(struct xlog_in_core *iclog, unsigned long caller_ip), \ TP_ARGS(iclog, caller_ip)) DEFINE_ICLOG_EVENT(xlog_iclog_activate); DEFINE_ICLOG_EVENT(xlog_iclog_clean); DEFINE_ICLOG_EVENT(xlog_iclog_callback); DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_start); DEFINE_ICLOG_EVENT(xlog_iclog_callbacks_done); DEFINE_ICLOG_EVENT(xlog_iclog_force); DEFINE_ICLOG_EVENT(xlog_iclog_force_lsn); DEFINE_ICLOG_EVENT(xlog_iclog_get_space); DEFINE_ICLOG_EVENT(xlog_iclog_release); DEFINE_ICLOG_EVENT(xlog_iclog_switch); DEFINE_ICLOG_EVENT(xlog_iclog_sync); DEFINE_ICLOG_EVENT(xlog_iclog_syncing); DEFINE_ICLOG_EVENT(xlog_iclog_sync_done); DEFINE_ICLOG_EVENT(xlog_iclog_want_sync); DEFINE_ICLOG_EVENT(xlog_iclog_wait_on); DEFINE_ICLOG_EVENT(xlog_iclog_write); TRACE_DEFINE_ENUM(XFS_DAS_UNINIT); TRACE_DEFINE_ENUM(XFS_DAS_SF_ADD); TRACE_DEFINE_ENUM(XFS_DAS_SF_REMOVE); TRACE_DEFINE_ENUM(XFS_DAS_LEAF_ADD); TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE); TRACE_DEFINE_ENUM(XFS_DAS_NODE_ADD); TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE); TRACE_DEFINE_ENUM(XFS_DAS_LEAF_SET_RMT); TRACE_DEFINE_ENUM(XFS_DAS_LEAF_ALLOC_RMT); TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REPLACE); TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_OLD); TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_RMT); TRACE_DEFINE_ENUM(XFS_DAS_LEAF_REMOVE_ATTR); TRACE_DEFINE_ENUM(XFS_DAS_NODE_SET_RMT); TRACE_DEFINE_ENUM(XFS_DAS_NODE_ALLOC_RMT); TRACE_DEFINE_ENUM(XFS_DAS_NODE_REPLACE); TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_OLD); TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_RMT); TRACE_DEFINE_ENUM(XFS_DAS_NODE_REMOVE_ATTR); TRACE_DEFINE_ENUM(XFS_DAS_DONE); DECLARE_EVENT_CLASS(xfs_das_state_class, TP_PROTO(int das, struct xfs_inode *ip), TP_ARGS(das, ip), TP_STRUCT__entry( __field(int, das) __field(xfs_ino_t, ino) ), TP_fast_assign( __entry->das = das; __entry->ino = ip->i_ino; ), TP_printk("state change %s ino 0x%llx", __print_symbolic(__entry->das, XFS_DAS_STRINGS), __entry->ino) ) #define DEFINE_DAS_STATE_EVENT(name) \ DEFINE_EVENT(xfs_das_state_class, name, \ TP_PROTO(int das, struct xfs_inode *ip), \ TP_ARGS(das, ip)) DEFINE_DAS_STATE_EVENT(xfs_attr_sf_addname_return); DEFINE_DAS_STATE_EVENT(xfs_attr_set_iter_return); DEFINE_DAS_STATE_EVENT(xfs_attr_leaf_addname_return); DEFINE_DAS_STATE_EVENT(xfs_attr_node_addname_return); DEFINE_DAS_STATE_EVENT(xfs_attr_remove_iter_return); DEFINE_DAS_STATE_EVENT(xfs_attr_rmtval_alloc); DEFINE_DAS_STATE_EVENT(xfs_attr_rmtval_remove_return); DEFINE_DAS_STATE_EVENT(xfs_attr_defer_add); TRACE_EVENT(xfs_force_shutdown, TP_PROTO(struct xfs_mount *mp, int ptag, int flags, const char *fname, int line_num), TP_ARGS(mp, ptag, flags, fname, line_num), TP_STRUCT__entry( __field(dev_t, dev) __field(int, ptag) __field(int, flags) __string(fname, fname) __field(int, line_num) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->ptag = ptag; __entry->flags = flags; __assign_str(fname); __entry->line_num = line_num; ), TP_printk("dev %d:%d tag %s flags %s file %s line_num %d", MAJOR(__entry->dev), MINOR(__entry->dev), __print_flags(__entry->ptag, "|", XFS_PTAG_STRINGS), __print_flags(__entry->flags, "|", XFS_SHUTDOWN_STRINGS), __get_str(fname), __entry->line_num) ); #ifdef CONFIG_XFS_DRAIN_INTENTS DECLARE_EVENT_CLASS(xfs_group_intents_class, TP_PROTO(const struct xfs_group *xg, void *caller_ip), TP_ARGS(xg, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_group_type, type) __field(uint32_t, index) __field(long, nr_intents) __field(void *, caller_ip) ), TP_fast_assign( __entry->dev = xg->xg_mount->m_super->s_dev; __entry->type = xg->xg_type; __entry->index = xg->xg_gno; __entry->nr_intents = atomic_read(&xg->xg_intents_drain.dr_count); __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d %sno 0x%x intents %ld caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->type, XG_TYPE_STRINGS), __entry->index, __entry->nr_intents, __entry->caller_ip) ); #define DEFINE_GROUP_INTENTS_EVENT(name) \ DEFINE_EVENT(xfs_group_intents_class, name, \ TP_PROTO(const struct xfs_group *xg, void *caller_ip), \ TP_ARGS(xg, caller_ip)) DEFINE_GROUP_INTENTS_EVENT(xfs_group_intent_hold); DEFINE_GROUP_INTENTS_EVENT(xfs_group_intent_rele); DEFINE_GROUP_INTENTS_EVENT(xfs_group_wait_intents); #endif /* CONFIG_XFS_DRAIN_INTENTS */ #ifdef CONFIG_XFS_MEMORY_BUFS TRACE_EVENT(xmbuf_create, TP_PROTO(struct xfs_buftarg *btp), TP_ARGS(btp), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, ino) __array(char, pathname, MAXNAMELEN) ), TP_fast_assign( char *path; struct file *file = btp->bt_file; __entry->dev = btp->bt_mount->m_super->s_dev; __entry->ino = file_inode(file)->i_ino; path = file_path(file, __entry->pathname, MAXNAMELEN); if (IS_ERR(path)) strncpy(__entry->pathname, "(unknown)", sizeof(__entry->pathname)); ), TP_printk("dev %d:%d xmino 0x%lx path '%s'", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->pathname) ); TRACE_EVENT(xmbuf_free, TP_PROTO(struct xfs_buftarg *btp), TP_ARGS(btp), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long, ino) __field(unsigned long long, bytes) __field(loff_t, size) ), TP_fast_assign( struct file *file = btp->bt_file; struct inode *inode = file_inode(file); __entry->dev = btp->bt_mount->m_super->s_dev; __entry->size = i_size_read(inode); __entry->bytes = (inode->i_blocks << SECTOR_SHIFT) + inode->i_bytes; __entry->ino = inode->i_ino; ), TP_printk("dev %d:%d xmino 0x%lx mem_bytes 0x%llx isize 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->bytes, __entry->size) ); #endif /* CONFIG_XFS_MEMORY_BUFS */ #ifdef CONFIG_XFS_BTREE_IN_MEM TRACE_EVENT(xfbtree_init, TP_PROTO(struct xfs_mount *mp, struct xfbtree *xfbt, const struct xfs_btree_ops *ops), TP_ARGS(mp, xfbt, ops), TP_STRUCT__entry( __field(const void *, btree_ops) __field(unsigned long, xfino) __field(unsigned int, leaf_mxr) __field(unsigned int, leaf_mnr) __field(unsigned int, node_mxr) __field(unsigned int, node_mnr) __field(unsigned long long, owner) ), TP_fast_assign( __entry->btree_ops = ops; __entry->xfino = file_inode(xfbt->target->bt_file)->i_ino; __entry->leaf_mxr = xfbt->maxrecs[0]; __entry->node_mxr = xfbt->maxrecs[1]; __entry->leaf_mnr = xfbt->minrecs[0]; __entry->node_mnr = xfbt->minrecs[1]; __entry->owner = xfbt->owner; ), TP_printk("xfino 0x%lx btree_ops %pS owner 0x%llx leaf_mxr %u leaf_mnr %u node_mxr %u node_mnr %u", __entry->xfino, __entry->btree_ops, __entry->owner, __entry->leaf_mxr, __entry->leaf_mnr, __entry->node_mxr, __entry->node_mnr) ); DECLARE_EVENT_CLASS(xfbtree_buf_class, TP_PROTO(struct xfbtree *xfbt, struct xfs_buf *bp), TP_ARGS(xfbt, bp), TP_STRUCT__entry( __field(unsigned long, xfino) __field(xfs_daddr_t, bno) __field(int, nblks) __field(int, hold) __field(int, pincount) __field(unsigned int, lockval) __field(unsigned int, flags) ), TP_fast_assign( __entry->xfino = file_inode(xfbt->target->bt_file)->i_ino; __entry->bno = xfs_buf_daddr(bp); __entry->nblks = bp->b_length; __entry->hold = bp->b_hold; __entry->pincount = atomic_read(&bp->b_pin_count); __entry->lockval = bp->b_sema.count; __entry->flags = bp->b_flags; ), TP_printk("xfino 0x%lx daddr 0x%llx bbcount 0x%x hold %d pincount %d lock %d flags %s", __entry->xfino, (unsigned long long)__entry->bno, __entry->nblks, __entry->hold, __entry->pincount, __entry->lockval, __print_flags(__entry->flags, "|", XFS_BUF_FLAGS)) ) #define DEFINE_XFBTREE_BUF_EVENT(name) \ DEFINE_EVENT(xfbtree_buf_class, name, \ TP_PROTO(struct xfbtree *xfbt, struct xfs_buf *bp), \ TP_ARGS(xfbt, bp)) DEFINE_XFBTREE_BUF_EVENT(xfbtree_create_root_buf); DEFINE_XFBTREE_BUF_EVENT(xfbtree_trans_commit_buf); DEFINE_XFBTREE_BUF_EVENT(xfbtree_trans_cancel_buf); DECLARE_EVENT_CLASS(xfbtree_freesp_class, TP_PROTO(struct xfbtree *xfbt, struct xfs_btree_cur *cur, xfs_fileoff_t fileoff), TP_ARGS(xfbt, cur, fileoff), TP_STRUCT__entry( __field(unsigned long, xfino) __string(btname, cur->bc_ops->name) __field(int, nlevels) __field(xfs_fileoff_t, fileoff) ), TP_fast_assign( __entry->xfino = file_inode(xfbt->target->bt_file)->i_ino; __assign_str(btname); __entry->nlevels = cur->bc_nlevels; __entry->fileoff = fileoff; ), TP_printk("xfino 0x%lx %sbt nlevels %d fileoff 0x%llx", __entry->xfino, __get_str(btname), __entry->nlevels, (unsigned long long)__entry->fileoff) ) #define DEFINE_XFBTREE_FREESP_EVENT(name) \ DEFINE_EVENT(xfbtree_freesp_class, name, \ TP_PROTO(struct xfbtree *xfbt, struct xfs_btree_cur *cur, \ xfs_fileoff_t fileoff), \ TP_ARGS(xfbt, cur, fileoff)) DEFINE_XFBTREE_FREESP_EVENT(xfbtree_alloc_block); DEFINE_XFBTREE_FREESP_EVENT(xfbtree_free_block); #endif /* CONFIG_XFS_BTREE_IN_MEM */ /* exchmaps tracepoints */ #define XFS_EXCHMAPS_STRINGS \ { XFS_EXCHMAPS_ATTR_FORK, "ATTRFORK" }, \ { XFS_EXCHMAPS_SET_SIZES, "SETSIZES" }, \ { XFS_EXCHMAPS_INO1_WRITTEN, "INO1_WRITTEN" }, \ { XFS_EXCHMAPS_CLEAR_INO1_REFLINK, "CLEAR_INO1_REFLINK" }, \ { XFS_EXCHMAPS_CLEAR_INO2_REFLINK, "CLEAR_INO2_REFLINK" }, \ { __XFS_EXCHMAPS_INO2_SHORTFORM, "INO2_SF" } DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping1_skip); DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping1); DEFINE_INODE_IREC_EVENT(xfs_exchmaps_mapping2); DEFINE_ITRUNC_EVENT(xfs_exchmaps_update_inode_size); #define XFS_EXCHRANGE_INODES \ { 1, "file1" }, \ { 2, "file2" } DECLARE_EVENT_CLASS(xfs_exchrange_inode_class, TP_PROTO(struct xfs_inode *ip, int whichfile), TP_ARGS(ip, whichfile), TP_STRUCT__entry( __field(dev_t, dev) __field(int, whichfile) __field(xfs_ino_t, ino) __field(int, format) __field(xfs_extnum_t, nex) __field(int, broot_size) __field(int, fork_off) ), TP_fast_assign( __entry->dev = VFS_I(ip)->i_sb->s_dev; __entry->whichfile = whichfile; __entry->ino = ip->i_ino; __entry->format = ip->i_df.if_format; __entry->nex = ip->i_df.if_nextents; __entry->fork_off = xfs_inode_fork_boff(ip); ), TP_printk("dev %d:%d ino 0x%llx whichfile %s format %s num_extents %llu forkoff 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __print_symbolic(__entry->whichfile, XFS_EXCHRANGE_INODES), __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR), __entry->nex, __entry->fork_off) ) #define DEFINE_EXCHRANGE_INODE_EVENT(name) \ DEFINE_EVENT(xfs_exchrange_inode_class, name, \ TP_PROTO(struct xfs_inode *ip, int whichfile), \ TP_ARGS(ip, whichfile)) DEFINE_EXCHRANGE_INODE_EVENT(xfs_exchrange_before); DEFINE_EXCHRANGE_INODE_EVENT(xfs_exchrange_after); DEFINE_INODE_ERROR_EVENT(xfs_exchrange_error); #define XFS_EXCHANGE_RANGE_FLAGS_STRS \ { XFS_EXCHANGE_RANGE_TO_EOF, "TO_EOF" }, \ { XFS_EXCHANGE_RANGE_DSYNC , "DSYNC" }, \ { XFS_EXCHANGE_RANGE_DRY_RUN, "DRY_RUN" }, \ { XFS_EXCHANGE_RANGE_FILE1_WRITTEN, "F1_WRITTEN" }, \ { __XFS_EXCHANGE_RANGE_UPD_CMTIME1, "CMTIME1" }, \ { __XFS_EXCHANGE_RANGE_UPD_CMTIME2, "CMTIME2" }, \ { __XFS_EXCHANGE_RANGE_CHECK_FRESH2, "FRESH2" } /* file exchange-range tracepoint class */ DECLARE_EVENT_CLASS(xfs_exchrange_class, TP_PROTO(const struct xfs_exchrange *fxr, struct xfs_inode *ip1, struct xfs_inode *ip2), TP_ARGS(fxr, ip1, ip2), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ip1_ino) __field(loff_t, ip1_isize) __field(loff_t, ip1_disize) __field(xfs_ino_t, ip2_ino) __field(loff_t, ip2_isize) __field(loff_t, ip2_disize) __field(loff_t, file1_offset) __field(loff_t, file2_offset) __field(unsigned long long, length) __field(unsigned long long, flags) ), TP_fast_assign( __entry->dev = VFS_I(ip1)->i_sb->s_dev; __entry->ip1_ino = ip1->i_ino; __entry->ip1_isize = VFS_I(ip1)->i_size; __entry->ip1_disize = ip1->i_disk_size; __entry->ip2_ino = ip2->i_ino; __entry->ip2_isize = VFS_I(ip2)->i_size; __entry->ip2_disize = ip2->i_disk_size; __entry->file1_offset = fxr->file1_offset; __entry->file2_offset = fxr->file2_offset; __entry->length = fxr->length; __entry->flags = fxr->flags; ), TP_printk("dev %d:%d flags %s bytecount 0x%llx " "ino1 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx -> " "ino2 0x%llx isize 0x%llx disize 0x%llx pos 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __print_flags_u64(__entry->flags, "|", XFS_EXCHANGE_RANGE_FLAGS_STRS), __entry->length, __entry->ip1_ino, __entry->ip1_isize, __entry->ip1_disize, __entry->file1_offset, __entry->ip2_ino, __entry->ip2_isize, __entry->ip2_disize, __entry->file2_offset) ) #define DEFINE_EXCHRANGE_EVENT(name) \ DEFINE_EVENT(xfs_exchrange_class, name, \ TP_PROTO(const struct xfs_exchrange *fxr, struct xfs_inode *ip1, \ struct xfs_inode *ip2), \ TP_ARGS(fxr, ip1, ip2)) DEFINE_EXCHRANGE_EVENT(xfs_exchrange_prep); DEFINE_EXCHRANGE_EVENT(xfs_exchrange_flush); DEFINE_EXCHRANGE_EVENT(xfs_exchrange_mappings); TRACE_EVENT(xfs_exchrange_freshness, TP_PROTO(const struct xfs_exchrange *fxr, struct xfs_inode *ip2), TP_ARGS(fxr, ip2), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ip2_ino) __field(long long, ip2_mtime) __field(long long, ip2_ctime) __field(int, ip2_mtime_nsec) __field(int, ip2_ctime_nsec) __field(xfs_ino_t, file2_ino) __field(long long, file2_mtime) __field(long long, file2_ctime) __field(int, file2_mtime_nsec) __field(int, file2_ctime_nsec) ), TP_fast_assign( struct timespec64 ts64; struct inode *inode2 = VFS_I(ip2); __entry->dev = inode2->i_sb->s_dev; __entry->ip2_ino = ip2->i_ino; ts64 = inode_get_ctime(inode2); __entry->ip2_ctime = ts64.tv_sec; __entry->ip2_ctime_nsec = ts64.tv_nsec; ts64 = inode_get_mtime(inode2); __entry->ip2_mtime = ts64.tv_sec; __entry->ip2_mtime_nsec = ts64.tv_nsec; __entry->file2_ino = fxr->file2_ino; __entry->file2_mtime = fxr->file2_mtime.tv_sec; __entry->file2_ctime = fxr->file2_ctime.tv_sec; __entry->file2_mtime_nsec = fxr->file2_mtime.tv_nsec; __entry->file2_ctime_nsec = fxr->file2_ctime.tv_nsec; ), TP_printk("dev %d:%d " "ino 0x%llx mtime %lld:%d ctime %lld:%d -> " "file 0x%llx mtime %lld:%d ctime %lld:%d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ip2_ino, __entry->ip2_mtime, __entry->ip2_mtime_nsec, __entry->ip2_ctime, __entry->ip2_ctime_nsec, __entry->file2_ino, __entry->file2_mtime, __entry->file2_mtime_nsec, __entry->file2_ctime, __entry->file2_ctime_nsec) ); TRACE_EVENT(xfs_exchmaps_overhead, TP_PROTO(struct xfs_mount *mp, unsigned long long bmbt_blocks, unsigned long long rmapbt_blocks), TP_ARGS(mp, bmbt_blocks, rmapbt_blocks), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long long, bmbt_blocks) __field(unsigned long long, rmapbt_blocks) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->bmbt_blocks = bmbt_blocks; __entry->rmapbt_blocks = rmapbt_blocks; ), TP_printk("dev %d:%d bmbt_blocks 0x%llx rmapbt_blocks 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->bmbt_blocks, __entry->rmapbt_blocks) ); DECLARE_EVENT_CLASS(xfs_exchmaps_estimate_class, TP_PROTO(const struct xfs_exchmaps_req *req), TP_ARGS(req), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino1) __field(xfs_ino_t, ino2) __field(xfs_fileoff_t, startoff1) __field(xfs_fileoff_t, startoff2) __field(xfs_filblks_t, blockcount) __field(uint64_t, flags) __field(xfs_filblks_t, ip1_bcount) __field(xfs_filblks_t, ip2_bcount) __field(xfs_filblks_t, ip1_rtbcount) __field(xfs_filblks_t, ip2_rtbcount) __field(unsigned long long, resblks) __field(unsigned long long, nr_exchanges) ), TP_fast_assign( __entry->dev = req->ip1->i_mount->m_super->s_dev; __entry->ino1 = req->ip1->i_ino; __entry->ino2 = req->ip2->i_ino; __entry->startoff1 = req->startoff1; __entry->startoff2 = req->startoff2; __entry->blockcount = req->blockcount; __entry->flags = req->flags; __entry->ip1_bcount = req->ip1_bcount; __entry->ip2_bcount = req->ip2_bcount; __entry->ip1_rtbcount = req->ip1_rtbcount; __entry->ip2_rtbcount = req->ip2_rtbcount; __entry->resblks = req->resblks; __entry->nr_exchanges = req->nr_exchanges; ), TP_printk("dev %d:%d ino1 0x%llx fileoff1 0x%llx ino2 0x%llx fileoff2 0x%llx fsbcount 0x%llx flags (%s) bcount1 0x%llx rtbcount1 0x%llx bcount2 0x%llx rtbcount2 0x%llx resblks 0x%llx nr_exchanges %llu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino1, __entry->startoff1, __entry->ino2, __entry->startoff2, __entry->blockcount, __print_flags_u64(__entry->flags, "|", XFS_EXCHMAPS_STRINGS), __entry->ip1_bcount, __entry->ip1_rtbcount, __entry->ip2_bcount, __entry->ip2_rtbcount, __entry->resblks, __entry->nr_exchanges) ); #define DEFINE_EXCHMAPS_ESTIMATE_EVENT(name) \ DEFINE_EVENT(xfs_exchmaps_estimate_class, name, \ TP_PROTO(const struct xfs_exchmaps_req *req), \ TP_ARGS(req)) DEFINE_EXCHMAPS_ESTIMATE_EVENT(xfs_exchmaps_initial_estimate); DEFINE_EXCHMAPS_ESTIMATE_EVENT(xfs_exchmaps_final_estimate); DECLARE_EVENT_CLASS(xfs_exchmaps_intent_class, TP_PROTO(struct xfs_mount *mp, const struct xfs_exchmaps_intent *xmi), TP_ARGS(mp, xmi), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino1) __field(xfs_ino_t, ino2) __field(uint64_t, flags) __field(xfs_fileoff_t, startoff1) __field(xfs_fileoff_t, startoff2) __field(xfs_filblks_t, blockcount) __field(xfs_fsize_t, isize1) __field(xfs_fsize_t, isize2) __field(xfs_fsize_t, new_isize1) __field(xfs_fsize_t, new_isize2) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->ino1 = xmi->xmi_ip1->i_ino; __entry->ino2 = xmi->xmi_ip2->i_ino; __entry->flags = xmi->xmi_flags; __entry->startoff1 = xmi->xmi_startoff1; __entry->startoff2 = xmi->xmi_startoff2; __entry->blockcount = xmi->xmi_blockcount; __entry->isize1 = xmi->xmi_ip1->i_disk_size; __entry->isize2 = xmi->xmi_ip2->i_disk_size; __entry->new_isize1 = xmi->xmi_isize1; __entry->new_isize2 = xmi->xmi_isize2; ), TP_printk("dev %d:%d ino1 0x%llx fileoff1 0x%llx ino2 0x%llx fileoff2 0x%llx fsbcount 0x%llx flags (%s) isize1 0x%llx newisize1 0x%llx isize2 0x%llx newisize2 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino1, __entry->startoff1, __entry->ino2, __entry->startoff2, __entry->blockcount, __print_flags_u64(__entry->flags, "|", XFS_EXCHMAPS_STRINGS), __entry->isize1, __entry->new_isize1, __entry->isize2, __entry->new_isize2) ); #define DEFINE_EXCHMAPS_INTENT_EVENT(name) \ DEFINE_EVENT(xfs_exchmaps_intent_class, name, \ TP_PROTO(struct xfs_mount *mp, const struct xfs_exchmaps_intent *xmi), \ TP_ARGS(mp, xmi)) DEFINE_EXCHMAPS_INTENT_EVENT(xfs_exchmaps_defer); DEFINE_EXCHMAPS_INTENT_EVENT(xfs_exchmaps_recover); TRACE_EVENT(xfs_exchmaps_delta_nextents_step, TP_PROTO(struct xfs_mount *mp, const struct xfs_bmbt_irec *left, const struct xfs_bmbt_irec *curr, const struct xfs_bmbt_irec *new, const struct xfs_bmbt_irec *right, int delta, unsigned int state), TP_ARGS(mp, left, curr, new, right, delta, state), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_fileoff_t, loff) __field(xfs_fsblock_t, lstart) __field(xfs_filblks_t, lcount) __field(xfs_fileoff_t, coff) __field(xfs_fsblock_t, cstart) __field(xfs_filblks_t, ccount) __field(xfs_fileoff_t, noff) __field(xfs_fsblock_t, nstart) __field(xfs_filblks_t, ncount) __field(xfs_fileoff_t, roff) __field(xfs_fsblock_t, rstart) __field(xfs_filblks_t, rcount) __field(int, delta) __field(unsigned int, state) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->loff = left->br_startoff; __entry->lstart = left->br_startblock; __entry->lcount = left->br_blockcount; __entry->coff = curr->br_startoff; __entry->cstart = curr->br_startblock; __entry->ccount = curr->br_blockcount; __entry->noff = new->br_startoff; __entry->nstart = new->br_startblock; __entry->ncount = new->br_blockcount; __entry->roff = right->br_startoff; __entry->rstart = right->br_startblock; __entry->rcount = right->br_blockcount; __entry->delta = delta; __entry->state = state; ), TP_printk("dev %d:%d left 0x%llx:0x%llx:0x%llx; curr 0x%llx:0x%llx:0x%llx <- new 0x%llx:0x%llx:0x%llx; right 0x%llx:0x%llx:0x%llx delta %d state 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->loff, __entry->lstart, __entry->lcount, __entry->coff, __entry->cstart, __entry->ccount, __entry->noff, __entry->nstart, __entry->ncount, __entry->roff, __entry->rstart, __entry->rcount, __entry->delta, __entry->state) ); TRACE_EVENT(xfs_exchmaps_delta_nextents, TP_PROTO(const struct xfs_exchmaps_req *req, int64_t d_nexts1, int64_t d_nexts2), TP_ARGS(req, d_nexts1, d_nexts2), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino1) __field(xfs_ino_t, ino2) __field(xfs_extnum_t, nexts1) __field(xfs_extnum_t, nexts2) __field(int64_t, d_nexts1) __field(int64_t, d_nexts2) ), TP_fast_assign( int whichfork = xfs_exchmaps_reqfork(req); __entry->dev = req->ip1->i_mount->m_super->s_dev; __entry->ino1 = req->ip1->i_ino; __entry->ino2 = req->ip2->i_ino; __entry->nexts1 = xfs_ifork_ptr(req->ip1, whichfork)->if_nextents; __entry->nexts2 = xfs_ifork_ptr(req->ip2, whichfork)->if_nextents; __entry->d_nexts1 = d_nexts1; __entry->d_nexts2 = d_nexts2; ), TP_printk("dev %d:%d ino1 0x%llx nexts %llu ino2 0x%llx nexts %llu delta1 %lld delta2 %lld", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino1, __entry->nexts1, __entry->ino2, __entry->nexts2, __entry->d_nexts1, __entry->d_nexts2) ); DECLARE_EVENT_CLASS(xfs_getparents_rec_class, TP_PROTO(struct xfs_inode *ip, const struct xfs_getparents *ppi, const struct xfs_attr_list_context *context, const struct xfs_getparents_rec *pptr), TP_ARGS(ip, ppi, context, pptr), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(unsigned int, firstu) __field(unsigned short, reclen) __field(unsigned int, bufsize) __field(xfs_ino_t, parent_ino) __field(unsigned int, parent_gen) __string(name, pptr->gpr_name) ), TP_fast_assign( __entry->dev = ip->i_mount->m_super->s_dev; __entry->ino = ip->i_ino; __entry->firstu = context->firstu; __entry->reclen = pptr->gpr_reclen; __entry->bufsize = ppi->gp_bufsize; __entry->parent_ino = pptr->gpr_parent.ha_fid.fid_ino; __entry->parent_gen = pptr->gpr_parent.ha_fid.fid_gen; __assign_str(name); ), TP_printk("dev %d:%d ino 0x%llx firstu %u reclen %u bufsize %u parent_ino 0x%llx parent_gen 0x%x name '%s'", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->firstu, __entry->reclen, __entry->bufsize, __entry->parent_ino, __entry->parent_gen, __get_str(name)) ) #define DEFINE_XFS_GETPARENTS_REC_EVENT(name) \ DEFINE_EVENT(xfs_getparents_rec_class, name, \ TP_PROTO(struct xfs_inode *ip, const struct xfs_getparents *ppi, \ const struct xfs_attr_list_context *context, \ const struct xfs_getparents_rec *pptr), \ TP_ARGS(ip, ppi, context, pptr)) DEFINE_XFS_GETPARENTS_REC_EVENT(xfs_getparents_put_listent); DEFINE_XFS_GETPARENTS_REC_EVENT(xfs_getparents_expand_lastrec); DECLARE_EVENT_CLASS(xfs_getparents_class, TP_PROTO(struct xfs_inode *ip, const struct xfs_getparents *ppi, const struct xfs_attrlist_cursor_kern *cur), TP_ARGS(ip, ppi, cur), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, ino) __field(unsigned short, iflags) __field(unsigned short, oflags) __field(unsigned int, bufsize) __field(unsigned int, hashval) __field(unsigned int, blkno) __field(unsigned int, offset) __field(int, initted) ), TP_fast_assign( __entry->dev = ip->i_mount->m_super->s_dev; __entry->ino = ip->i_ino; __entry->iflags = ppi->gp_iflags; __entry->oflags = ppi->gp_oflags; __entry->bufsize = ppi->gp_bufsize; __entry->hashval = cur->hashval; __entry->blkno = cur->blkno; __entry->offset = cur->offset; __entry->initted = cur->initted; ), TP_printk("dev %d:%d ino 0x%llx iflags 0x%x oflags 0x%x bufsize %u cur_init? %d hashval 0x%x blkno %u offset %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->iflags, __entry->oflags, __entry->bufsize, __entry->initted, __entry->hashval, __entry->blkno, __entry->offset) ) #define DEFINE_XFS_GETPARENTS_EVENT(name) \ DEFINE_EVENT(xfs_getparents_class, name, \ TP_PROTO(struct xfs_inode *ip, const struct xfs_getparents *ppi, \ const struct xfs_attrlist_cursor_kern *cur), \ TP_ARGS(ip, ppi, cur)) DEFINE_XFS_GETPARENTS_EVENT(xfs_getparents_begin); DEFINE_XFS_GETPARENTS_EVENT(xfs_getparents_end); DECLARE_EVENT_CLASS(xfs_metadir_update_class, TP_PROTO(const struct xfs_metadir_update *upd), TP_ARGS(upd), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, dp_ino) __field(xfs_ino_t, ino) __string(fname, upd->path) ), TP_fast_assign( __entry->dev = upd->dp->i_mount->m_super->s_dev; __entry->dp_ino = upd->dp->i_ino; __entry->ino = upd->ip ? upd->ip->i_ino : NULLFSINO; __assign_str(fname); ), TP_printk("dev %d:%d dp 0x%llx fname '%s' ino 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->dp_ino, __get_str(fname), __entry->ino) ) #define DEFINE_METADIR_UPDATE_EVENT(name) \ DEFINE_EVENT(xfs_metadir_update_class, name, \ TP_PROTO(const struct xfs_metadir_update *upd), \ TP_ARGS(upd)) DEFINE_METADIR_UPDATE_EVENT(xfs_metadir_start_create); DEFINE_METADIR_UPDATE_EVENT(xfs_metadir_start_link); DEFINE_METADIR_UPDATE_EVENT(xfs_metadir_commit); DEFINE_METADIR_UPDATE_EVENT(xfs_metadir_cancel); DEFINE_METADIR_UPDATE_EVENT(xfs_metadir_try_create); DEFINE_METADIR_UPDATE_EVENT(xfs_metadir_create); DEFINE_METADIR_UPDATE_EVENT(xfs_metadir_link); DECLARE_EVENT_CLASS(xfs_metadir_update_error_class, TP_PROTO(const struct xfs_metadir_update *upd, int error), TP_ARGS(upd, error), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, dp_ino) __field(xfs_ino_t, ino) __field(int, error) __string(fname, upd->path) ), TP_fast_assign( __entry->dev = upd->dp->i_mount->m_super->s_dev; __entry->dp_ino = upd->dp->i_ino; __entry->ino = upd->ip ? upd->ip->i_ino : NULLFSINO; __entry->error = error; __assign_str(fname); ), TP_printk("dev %d:%d dp 0x%llx fname '%s' ino 0x%llx error %d", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->dp_ino, __get_str(fname), __entry->ino, __entry->error) ) #define DEFINE_METADIR_UPDATE_ERROR_EVENT(name) \ DEFINE_EVENT(xfs_metadir_update_error_class, name, \ TP_PROTO(const struct xfs_metadir_update *upd, int error), \ TP_ARGS(upd, error)) DEFINE_METADIR_UPDATE_ERROR_EVENT(xfs_metadir_teardown); DECLARE_EVENT_CLASS(xfs_metadir_class, TP_PROTO(struct xfs_inode *dp, struct xfs_name *name, xfs_ino_t ino), TP_ARGS(dp, name, ino), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_ino_t, dp_ino) __field(xfs_ino_t, ino) __field(int, ftype) __field(int, namelen) __dynamic_array(char, name, name->len) ), TP_fast_assign( __entry->dev = VFS_I(dp)->i_sb->s_dev; __entry->dp_ino = dp->i_ino; __entry->ino = ino, __entry->ftype = name->type; __entry->namelen = name->len; memcpy(__get_str(name), name->name, name->len); ), TP_printk("dev %d:%d dir 0x%llx type %s name '%.*s' ino 0x%llx", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->dp_ino, __print_symbolic(__entry->ftype, XFS_DIR3_FTYPE_STR), __entry->namelen, __get_str(name), __entry->ino) ) #define DEFINE_METADIR_EVENT(name) \ DEFINE_EVENT(xfs_metadir_class, name, \ TP_PROTO(struct xfs_inode *dp, struct xfs_name *name, \ xfs_ino_t ino), \ TP_ARGS(dp, name, ino)) DEFINE_METADIR_EVENT(xfs_metadir_lookup); /* metadata inode space reservations */ DECLARE_EVENT_CLASS(xfs_metafile_resv_class, TP_PROTO(struct xfs_mount *mp, xfs_filblks_t len), TP_ARGS(mp, len), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned long long, freeblks) __field(unsigned long long, reserved) __field(unsigned long long, asked) __field(unsigned long long, used) __field(unsigned long long, len) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->freeblks = xfs_sum_freecounter_raw(mp, XC_FREE_BLOCKS); __entry->reserved = mp->m_metafile_resv_avail; __entry->asked = mp->m_metafile_resv_target; __entry->used = mp->m_metafile_resv_used; __entry->len = len; ), TP_printk("dev %d:%d freeblks %llu resv %llu ask %llu used %llu len %llu", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->freeblks, __entry->reserved, __entry->asked, __entry->used, __entry->len) ) #define DEFINE_METAFILE_RESV_EVENT(name) \ DEFINE_EVENT(xfs_metafile_resv_class, name, \ TP_PROTO(struct xfs_mount *mp, xfs_filblks_t len), \ TP_ARGS(mp, len)) DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_init); DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_free); DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_alloc_space); DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_free_space); DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_critical); DEFINE_METAFILE_RESV_EVENT(xfs_metafile_resv_init_error); #ifdef CONFIG_XFS_RT TRACE_EVENT(xfs_growfs_check_rtgeom, TP_PROTO(const struct xfs_mount *mp, unsigned int min_logfsbs), TP_ARGS(mp, min_logfsbs), TP_STRUCT__entry( __field(dev_t, dev) __field(unsigned int, logblocks) __field(unsigned int, min_logfsbs) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->logblocks = mp->m_sb.sb_logblocks; __entry->min_logfsbs = min_logfsbs; ), TP_printk("dev %d:%d logblocks %u min_logfsbs %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->logblocks, __entry->min_logfsbs) ); #endif /* CONFIG_XFS_RT */ TRACE_DEFINE_ENUM(XC_FREE_BLOCKS); TRACE_DEFINE_ENUM(XC_FREE_RTEXTENTS); TRACE_DEFINE_ENUM(XC_FREE_RTAVAILABLE); DECLARE_EVENT_CLASS(xfs_freeblocks_resv_class, TP_PROTO(struct xfs_mount *mp, enum xfs_free_counter ctr, uint64_t delta, unsigned long caller_ip), TP_ARGS(mp, ctr, delta, caller_ip), TP_STRUCT__entry( __field(dev_t, dev) __field(enum xfs_free_counter, ctr) __field(uint64_t, delta) __field(uint64_t, avail) __field(uint64_t, total) __field(unsigned long, caller_ip) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; __entry->ctr = ctr; __entry->delta = delta; __entry->avail = mp->m_free[ctr].res_avail; __entry->total = mp->m_free[ctr].res_total; __entry->caller_ip = caller_ip; ), TP_printk("dev %d:%d ctr %s delta %llu avail %llu total %llu caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev), __print_symbolic(__entry->ctr, XFS_FREECOUNTER_STR), __entry->delta, __entry->avail, __entry->total, (char *)__entry->caller_ip) ) #define DEFINE_FREEBLOCKS_RESV_EVENT(name) \ DEFINE_EVENT(xfs_freeblocks_resv_class, name, \ TP_PROTO(struct xfs_mount *mp, enum xfs_free_counter ctr, \ uint64_t delta, unsigned long caller_ip), \ TP_ARGS(mp, ctr, delta, caller_ip)) DEFINE_FREEBLOCKS_RESV_EVENT(xfs_freecounter_reserved); DEFINE_FREEBLOCKS_RESV_EVENT(xfs_freecounter_enospc); #endif /* _TRACE_XFS_H */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE xfs_trace #include <trace/define_trace.h> |
31 7 7 7 7 9 9 8 8 7 7 4 6 6 5 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Glue Code for 3-way parallel assembler optimized version of Twofish * * Copyright (c) 2011 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> */ #include <asm/cpu_device_id.h> #include <crypto/algapi.h> #include <crypto/twofish.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include "twofish.h" #include "ecb_cbc_helpers.h" EXPORT_SYMBOL_GPL(__twofish_enc_blk_3way); EXPORT_SYMBOL_GPL(twofish_dec_blk_3way); static int twofish_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return twofish_setkey(&tfm->base, key, keylen); } static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src) { __twofish_enc_blk_3way(ctx, dst, src, false); } void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src) { u8 buf[2][TF_BLOCK_SIZE]; const u8 *s = src; if (dst == src) s = memcpy(buf, src, sizeof(buf)); twofish_dec_blk_3way(ctx, dst, src); crypto_xor(dst + TF_BLOCK_SIZE, s, sizeof(buf)); } EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way); static int ecb_encrypt(struct skcipher_request *req) { ECB_WALK_START(req, TF_BLOCK_SIZE, -1); ECB_BLOCK(3, twofish_enc_blk_3way); ECB_BLOCK(1, twofish_enc_blk); ECB_WALK_END(); } static int ecb_decrypt(struct skcipher_request *req) { ECB_WALK_START(req, TF_BLOCK_SIZE, -1); ECB_BLOCK(3, twofish_dec_blk_3way); ECB_BLOCK(1, twofish_dec_blk); ECB_WALK_END(); } static int cbc_encrypt(struct skcipher_request *req) { CBC_WALK_START(req, TF_BLOCK_SIZE, -1); CBC_ENC_BLOCK(twofish_enc_blk); CBC_WALK_END(); } static int cbc_decrypt(struct skcipher_request *req) { CBC_WALK_START(req, TF_BLOCK_SIZE, -1); CBC_DEC_BLOCK(3, twofish_dec_blk_cbc_3way); CBC_DEC_BLOCK(1, twofish_dec_blk); CBC_WALK_END(); } static struct skcipher_alg tf_skciphers[] = { { .base.cra_name = "ecb(twofish)", .base.cra_driver_name = "ecb-twofish-3way", .base.cra_priority = 300, .base.cra_blocksize = TF_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct twofish_ctx), .base.cra_module = THIS_MODULE, .min_keysize = TF_MIN_KEY_SIZE, .max_keysize = TF_MAX_KEY_SIZE, .setkey = twofish_setkey_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "cbc(twofish)", .base.cra_driver_name = "cbc-twofish-3way", .base.cra_priority = 300, .base.cra_blocksize = TF_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct twofish_ctx), .base.cra_module = THIS_MODULE, .min_keysize = TF_MIN_KEY_SIZE, .max_keysize = TF_MAX_KEY_SIZE, .ivsize = TF_BLOCK_SIZE, .setkey = twofish_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, }, }; static bool is_blacklisted_cpu(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; switch (boot_cpu_data.x86_vfm) { case INTEL_ATOM_BONNELL: case INTEL_ATOM_BONNELL_MID: case INTEL_ATOM_SALTWELL: /* * On Atom, twofish-3way is slower than original assembler * implementation. Twofish-3way trades off some performance in * storing blocks in 64bit registers to allow three blocks to * be processed parallel. Parallel operation then allows gaining * more performance than was trade off, on out-of-order CPUs. * However Atom does not benefit from this parallelism and * should be blacklisted. */ return true; } if (boot_cpu_data.x86 == 0x0f) { /* * On Pentium 4, twofish-3way is slower than original assembler * implementation because excessive uses of 64bit rotate and * left-shifts (which are really slow on P4) needed to store and * handle 128bit block in two 64bit registers. */ return true; } return false; } static int force; module_param(force, int, 0); MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); static int __init twofish_3way_init(void) { if (!force && is_blacklisted_cpu()) { printk(KERN_INFO "twofish-x86_64-3way: performance on this CPU " "would be suboptimal: disabling " "twofish-x86_64-3way.\n"); return -ENODEV; } return crypto_register_skciphers(tf_skciphers, ARRAY_SIZE(tf_skciphers)); } static void __exit twofish_3way_fini(void) { crypto_unregister_skciphers(tf_skciphers, ARRAY_SIZE(tf_skciphers)); } module_init(twofish_3way_init); module_exit(twofish_3way_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized"); MODULE_ALIAS_CRYPTO("twofish"); MODULE_ALIAS_CRYPTO("twofish-asm"); |
10 1 2 1 31 45 45 4 4 9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 | /* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * Copyright (c) 2008 Red Hat Inc. * Copyright (c) 2016 Intel Corporation * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. */ #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_gem.h> #include <drm/drm_mode.h> #include "drm_crtc_internal.h" #include "drm_internal.h" /** * DOC: overview * * The KMS API doesn't standardize backing storage object creation and leaves it * to driver-specific ioctls. Furthermore actually creating a buffer object even * for GEM-based drivers is done through a driver-specific ioctl - GEM only has * a common userspace interface for sharing and destroying objects. While not an * issue for full-fledged graphics stacks that include device-specific userspace * components (in libdrm for instance), this limit makes DRM-based early boot * graphics unnecessarily complex. * * Dumb objects partly alleviate the problem by providing a standard API to * create dumb buffers suitable for scanout, which can then be used to create * KMS frame buffers. * * To support dumb objects drivers must implement the &drm_driver.dumb_create * and &drm_driver.dumb_map_offset operations (the latter defaults to * drm_gem_dumb_map_offset() if not set). Drivers that don't use GEM handles * additionally need to implement the &drm_driver.dumb_destroy operation. See * the callbacks for further details. * * Note that dumb objects may not be used for gpu acceleration, as has been * attempted on some ARM embedded platforms. Such drivers really must have * a hardware-specific ioctl to allocate suitable buffer objects. */ int drm_mode_create_dumb(struct drm_device *dev, struct drm_mode_create_dumb *args, struct drm_file *file_priv) { u32 cpp, stride, size; if (!dev->driver->dumb_create) return -ENOSYS; if (!args->width || !args->height || !args->bpp) return -EINVAL; /* overflow checks for 32bit size calculations */ if (args->bpp > U32_MAX - 8) return -EINVAL; cpp = DIV_ROUND_UP(args->bpp, 8); if (cpp > U32_MAX / args->width) return -EINVAL; stride = cpp * args->width; if (args->height > U32_MAX / stride) return -EINVAL; /* test for wrap-around */ size = args->height * stride; if (PAGE_ALIGN(size) == 0) return -EINVAL; /* * handle, pitch and size are output parameters. Zero them out to * prevent drivers from accidentally using uninitialized data. Since * not all existing userspace is clearing these fields properly we * cannot reject IOCTL with garbage in them. */ args->handle = 0; args->pitch = 0; args->size = 0; return dev->driver->dumb_create(file_priv, dev, args); } int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { return drm_mode_create_dumb(dev, data, file_priv); } /** * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Allocate an offset in the drm device node's address space to be able to * memory map a dumb buffer. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; if (!dev->driver->dumb_create) return -ENOSYS; if (dev->driver->dumb_map_offset) return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); else return drm_gem_dumb_map_offset(file_priv, dev, args->handle, &args->offset); } int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle, struct drm_file *file_priv) { if (!dev->driver->dumb_create) return -ENOSYS; return drm_gem_handle_delete(file_priv, handle); } int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_destroy_dumb *args = data; return drm_mode_destroy_dumb(dev, args->handle, file_priv); } |
2 1 1 2 2 9 7 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/mutex.h> #include <linux/videodev2.h> #include <linux/unaligned.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> /* * 'Thanko's Raremono' is a Japanese si4734-based AM/FM/SW USB receiver: * * http://www.raremono.jp/product/484.html/ * * The USB protocol has been reversed engineered using wireshark, initially * by Dinesh Ram <dinesh.ram@cern.ch> and finished by Hans Verkuil * <hverkuil@xs4all.nl>. * * Sadly the firmware used in this product hides lots of goodies since the * si4734 has more features than are supported by the firmware. Oh well... */ /* driver and module definitions */ MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>"); MODULE_DESCRIPTION("Thanko's Raremono AM/FM/SW Receiver USB driver"); MODULE_LICENSE("GPL v2"); /* * The Device announces itself as Cygnal Integrated Products, Inc. * * The vendor and product IDs (and in fact all other lsusb information as * well) are identical to the si470x Silicon Labs USB FM Radio Reference * Design board, even though this card has a si4734 device. Clearly the * designer of this product never bothered to change the USB IDs. */ /* USB Device ID List */ static const struct usb_device_id usb_raremono_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(0x10c4, 0x818a, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_raremono_device_table); #define BUFFER_LENGTH 64 /* Timeout is set to a high value, could probably be reduced. Need more tests */ #define USB_TIMEOUT 10000 /* Frequency limits in KHz */ #define FM_FREQ_RANGE_LOW 64000 #define FM_FREQ_RANGE_HIGH 108000 #define AM_FREQ_RANGE_LOW 520 #define AM_FREQ_RANGE_HIGH 1710 #define SW_FREQ_RANGE_LOW 2300 #define SW_FREQ_RANGE_HIGH 26100 enum { BAND_FM, BAND_AM, BAND_SW }; static const struct v4l2_frequency_band bands[] = { /* Band FM */ { .type = V4L2_TUNER_RADIO, .index = 0, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = FM_FREQ_RANGE_LOW * 16, .rangehigh = FM_FREQ_RANGE_HIGH * 16, .modulation = V4L2_BAND_MODULATION_FM, }, /* Band AM */ { .type = V4L2_TUNER_RADIO, .index = 1, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = AM_FREQ_RANGE_LOW * 16, .rangehigh = AM_FREQ_RANGE_HIGH * 16, .modulation = V4L2_BAND_MODULATION_AM, }, /* Band SW */ { .type = V4L2_TUNER_RADIO, .index = 2, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = SW_FREQ_RANGE_LOW * 16, .rangehigh = SW_FREQ_RANGE_HIGH * 16, .modulation = V4L2_BAND_MODULATION_AM, }, }; struct raremono_device { struct usb_device *usbdev; struct usb_interface *intf; struct video_device vdev; struct v4l2_device v4l2_dev; struct mutex lock; u8 *buffer; u32 band; unsigned curfreq; }; static inline struct raremono_device *to_raremono_dev(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct raremono_device, v4l2_dev); } /* Set frequency. */ static int raremono_cmd_main(struct raremono_device *radio, unsigned band, unsigned freq) { unsigned band_offset; int ret; switch (band) { case BAND_FM: band_offset = 1; freq /= 10; break; case BAND_AM: band_offset = 0; break; default: band_offset = 2; break; } radio->buffer[0] = 0x04 + band_offset; radio->buffer[1] = freq >> 8; radio->buffer[2] = freq & 0xff; ret = usb_control_msg(radio->usbdev, usb_sndctrlpipe(radio->usbdev, 0), HID_REQ_SET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, 0x0300 + radio->buffer[0], 2, radio->buffer, 3, USB_TIMEOUT); if (ret < 0) { dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret); return ret; } radio->curfreq = (band == BAND_FM) ? freq * 10 : freq; return 0; } /* Handle unplugging the device. * We call video_unregister_device in any case. * The last function called in this procedure is * usb_raremono_device_release. */ static void usb_raremono_disconnect(struct usb_interface *intf) { struct raremono_device *radio = to_raremono_dev(usb_get_intfdata(intf)); dev_info(&intf->dev, "Thanko's Raremono disconnected\n"); mutex_lock(&radio->lock); usb_set_intfdata(intf, NULL); video_unregister_device(&radio->vdev); v4l2_device_disconnect(&radio->v4l2_dev); mutex_unlock(&radio->lock); v4l2_device_put(&radio->v4l2_dev); } /* * Linux Video interface */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { struct raremono_device *radio = video_drvdata(file); strscpy(v->driver, "radio-raremono", sizeof(v->driver)); strscpy(v->card, "Thanko's Raremono", sizeof(v->card)); usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); return 0; } static int vidioc_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { if (band->tuner != 0) return -EINVAL; if (band->index >= ARRAY_SIZE(bands)) return -EINVAL; *band = bands[band->index]; return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct raremono_device *radio = video_drvdata(file); int ret; if (v->index > 0) return -EINVAL; strscpy(v->name, "AM/FM/SW", sizeof(v->name)); v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = AM_FREQ_RANGE_LOW * 16; v->rangehigh = FM_FREQ_RANGE_HIGH * 16; v->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; v->audmode = (radio->curfreq < FM_FREQ_RANGE_LOW) ? V4L2_TUNER_MODE_MONO : V4L2_TUNER_MODE_STEREO; memset(radio->buffer, 1, BUFFER_LENGTH); ret = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), 1, 0xa1, 0x030d, 2, radio->buffer, BUFFER_LENGTH, USB_TIMEOUT); if (ret < 0) { dev_warn(radio->v4l2_dev.dev, "%s failed (%d)\n", __func__, ret); return ret; } v->signal = ((radio->buffer[1] & 0xf) << 8 | radio->buffer[2]) << 4; return 0; } static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { return v->index ? -EINVAL : 0; } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct raremono_device *radio = video_drvdata(file); u32 freq; unsigned band; if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; if (f->frequency >= (FM_FREQ_RANGE_LOW + SW_FREQ_RANGE_HIGH) * 8) band = BAND_FM; else if (f->frequency <= (AM_FREQ_RANGE_HIGH + SW_FREQ_RANGE_LOW) * 8) band = BAND_AM; else band = BAND_SW; freq = clamp_t(u32, f->frequency, bands[band].rangelow, bands[band].rangehigh); return raremono_cmd_main(radio, band, freq / 16); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct raremono_device *radio = video_drvdata(file); if (f->tuner != 0) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = radio->curfreq * 16; return 0; } static void raremono_device_release(struct v4l2_device *v4l2_dev) { struct raremono_device *radio = to_raremono_dev(v4l2_dev); kfree(radio->buffer); kfree(radio); } /* File system interface */ static const struct v4l2_file_operations usb_raremono_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = v4l2_fh_release, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops usb_raremono_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_enum_freq_bands = vidioc_enum_freq_bands, }; /* check if the device is present and register with v4l and usb if it is */ static int usb_raremono_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct raremono_device *radio; int retval = 0; radio = kzalloc(sizeof(*radio), GFP_KERNEL); if (!radio) return -ENOMEM; radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL); if (!radio->buffer) { kfree(radio); return -ENOMEM; } radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; /* * This device uses the same USB IDs as the si470x SiLabs reference * design. So do an additional check: attempt to read the device ID * from the si470x: the lower 12 bits are 0x0242 for the si470x. The * Raremono always returns 0x0800 (the meaning of that is unknown, but * at least it works). * * We use this check to determine which device we are dealing with. */ msleep(20); retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), HID_REQ_GET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, 1, 2, radio->buffer, 3, 500); if (retval != 3 || (get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) { dev_info(&intf->dev, "this is not Thanko's Raremono.\n"); retval = -ENODEV; goto free_mem; } dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n", id->idVendor, id->idProduct); retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); goto free_mem; } mutex_init(&radio->lock); strscpy(radio->vdev.name, radio->v4l2_dev.name, sizeof(radio->vdev.name)); radio->vdev.v4l2_dev = &radio->v4l2_dev; radio->vdev.fops = &usb_raremono_fops; radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops; radio->vdev.lock = &radio->lock; radio->vdev.release = video_device_release_empty; radio->vdev.device_caps = V4L2_CAP_TUNER | V4L2_CAP_RADIO; radio->v4l2_dev.release = raremono_device_release; usb_set_intfdata(intf, &radio->v4l2_dev); video_set_drvdata(&radio->vdev, radio); raremono_cmd_main(radio, BAND_FM, 95160); retval = video_register_device(&radio->vdev, VFL_TYPE_RADIO, -1); if (retval == 0) { dev_info(&intf->dev, "V4L2 device registered as %s\n", video_device_node_name(&radio->vdev)); return 0; } dev_err(&intf->dev, "could not register video device\n"); v4l2_device_unregister(&radio->v4l2_dev); free_mem: kfree(radio->buffer); kfree(radio); return retval; } /* USB subsystem interface */ static struct usb_driver usb_raremono_driver = { .name = "radio-raremono", .probe = usb_raremono_probe, .disconnect = usb_raremono_disconnect, .id_table = usb_raremono_device_table, }; module_usb_driver(usb_raremono_driver); |
10 10 10 10 10 10 10 10 10 10 10 10 10 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005-2006 Dell Inc. * * Serial Attached SCSI (SAS) transport class. * * The SAS transport class contains common code to deal with SAS HBAs, * an aproximated representation of SAS topologies in the driver model, * and various sysfs attributes to expose these topologies and management * interfaces to userspace. * * In addition to the basic SCSI core objects this transport class * introduces two additional intermediate objects: The SAS PHY * as represented by struct sas_phy defines an "outgoing" PHY on * a SAS HBA or Expander, and the SAS remote PHY represented by * struct sas_rphy defines an "incoming" PHY on a SAS Expander or * end device. Note that this is purely a software concept, the * underlying hardware for a PHY and a remote PHY is the exactly * the same. * * There is no concept of a SAS port in this code, users can see * what PHYs form a wide port based on the port_identifier attribute, * which is the same for all PHYs in a port. */ #include <linux/init.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/bsg.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_sas.h> #include "scsi_sas_internal.h" struct sas_host_attrs { struct list_head rphy_list; struct mutex lock; struct request_queue *q; u32 next_target_id; u32 next_expander_id; int next_port_id; }; #define to_sas_host_attrs(host) ((struct sas_host_attrs *)(host)->shost_data) /* * Hack to allow attributes of the same name in different objects. */ #define SAS_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute dev_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) /* * Pretty printing helpers */ #define sas_bitfield_name_match(title, table) \ static ssize_t \ get_sas_##title##_names(u32 table_key, char *buf) \ { \ char *prefix = ""; \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value & table_key) { \ len += sprintf(buf + len, "%s%s", \ prefix, table[i].name); \ prefix = ", "; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } #define sas_bitfield_name_set(title, table) \ static ssize_t \ set_sas_##title##_names(u32 *table_key, const char *buf) \ { \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ len = strlen(table[i].name); \ if (strncmp(buf, table[i].name, len) == 0 && \ (buf[len] == '\n' || buf[len] == '\0')) { \ *table_key = table[i].value; \ return 0; \ } \ } \ return -EINVAL; \ } #define sas_bitfield_name_search(title, table) \ static ssize_t \ get_sas_##title##_names(u32 table_key, char *buf) \ { \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value == table_key) { \ len += sprintf(buf + len, "%s", \ table[i].name); \ break; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } static struct { u32 value; char *name; } sas_device_type_names[] = { { SAS_PHY_UNUSED, "unused" }, { SAS_END_DEVICE, "end device" }, { SAS_EDGE_EXPANDER_DEVICE, "edge expander" }, { SAS_FANOUT_EXPANDER_DEVICE, "fanout expander" }, }; sas_bitfield_name_search(device_type, sas_device_type_names) static struct { u32 value; char *name; } sas_protocol_names[] = { { SAS_PROTOCOL_SATA, "sata" }, { SAS_PROTOCOL_SMP, "smp" }, { SAS_PROTOCOL_STP, "stp" }, { SAS_PROTOCOL_SSP, "ssp" }, }; sas_bitfield_name_match(protocol, sas_protocol_names) static struct { u32 value; char *name; } sas_linkspeed_names[] = { { SAS_LINK_RATE_UNKNOWN, "Unknown" }, { SAS_PHY_DISABLED, "Phy disabled" }, { SAS_LINK_RATE_FAILED, "Link Rate failed" }, { SAS_SATA_SPINUP_HOLD, "Spin-up hold" }, { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, { SAS_LINK_RATE_22_5_GBPS, "22.5 Gbit" }, }; sas_bitfield_name_search(linkspeed, sas_linkspeed_names) sas_bitfield_name_set(linkspeed, sas_linkspeed_names) static struct sas_end_device *sas_sdev_to_rdev(struct scsi_device *sdev) { struct sas_rphy *rphy = target_to_rphy(sdev->sdev_target); struct sas_end_device *rdev; BUG_ON(rphy->identify.device_type != SAS_END_DEVICE); rdev = rphy_to_end_device(rphy); return rdev; } static int sas_smp_dispatch(struct bsg_job *job) { struct Scsi_Host *shost = dev_to_shost(job->dev); struct sas_rphy *rphy = NULL; if (!scsi_is_host_device(job->dev)) rphy = dev_to_rphy(job->dev); if (!job->reply_payload.payload_len) { dev_warn(job->dev, "space for a smp response is missing\n"); bsg_job_done(job, -EINVAL, 0); return 0; } to_sas_internal(shost->transportt)->f->smp_handler(job, shost, rphy); return 0; } static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) { struct request_queue *q; if (!to_sas_internal(shost->transportt)->f->smp_handler) { printk("%s can't handle SMP requests\n", shost->hostt->name); return 0; } if (rphy) { q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), NULL, sas_smp_dispatch, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); rphy->q = q; } else { char name[20]; snprintf(name, sizeof(name), "sas_host%d", shost->host_no); q = bsg_setup_queue(&shost->shost_gendev, name, NULL, sas_smp_dispatch, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); to_sas_host_attrs(shost)->q = q; } return 0; } /* * SAS host attributes */ static int sas_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct device *dma_dev = shost->dma_dev; INIT_LIST_HEAD(&sas_host->rphy_list); mutex_init(&sas_host->lock); sas_host->next_target_id = 0; sas_host->next_expander_id = 0; sas_host->next_port_id = 0; if (sas_bsg_initialize(shost, NULL)) dev_printk(KERN_ERR, dev, "fail to a bsg device %d\n", shost->host_no); if (dma_dev->dma_mask) { shost->opt_sectors = min_t(unsigned int, shost->max_sectors, dma_opt_mapping_size(dma_dev) >> SECTOR_SHIFT); } return 0; } static int sas_host_remove(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); struct request_queue *q = to_sas_host_attrs(shost)->q; bsg_remove_queue(q); return 0; } static DECLARE_TRANSPORT_CLASS(sas_host_class, "sas_host", sas_host_setup, sas_host_remove, NULL); static int sas_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->t.host_attrs.ac == cont; } static int do_sas_phy_delete(struct device *dev, void *data) { int pass = (int)(unsigned long)data; if (pass == 0 && scsi_is_sas_port(dev)) sas_port_delete(dev_to_sas_port(dev)); else if (pass == 1 && scsi_is_sas_phy(dev)) sas_phy_delete(dev_to_phy(dev)); return 0; } /** * sas_remove_children - tear down a devices SAS data structures * @dev: device belonging to the sas object * * Removes all SAS PHYs and remote PHYs for a given object */ void sas_remove_children(struct device *dev) { device_for_each_child(dev, (void *)0, do_sas_phy_delete); device_for_each_child(dev, (void *)1, do_sas_phy_delete); } EXPORT_SYMBOL(sas_remove_children); /** * sas_remove_host - tear down a Scsi_Host's SAS data structures * @shost: Scsi Host that is torn down * * Removes all SAS PHYs and remote PHYs for a given Scsi_Host and remove the * Scsi_Host as well. * * Note: Do not call scsi_remove_host() on the Scsi_Host any more, as it is * already removed. */ void sas_remove_host(struct Scsi_Host *shost) { sas_remove_children(&shost->shost_gendev); scsi_remove_host(shost); } EXPORT_SYMBOL(sas_remove_host); /** * sas_get_address - return the SAS address of the device * @sdev: scsi device * * Returns the SAS address of the scsi device */ u64 sas_get_address(struct scsi_device *sdev) { struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); return rdev->rphy.identify.sas_address; } EXPORT_SYMBOL(sas_get_address); /** * sas_tlr_supported - checking TLR bit in vpd 0x90 * @sdev: scsi device struct * * Check Transport Layer Retries are supported or not. * If vpd page 0x90 is present, TRL is supported. * */ unsigned int sas_tlr_supported(struct scsi_device *sdev) { const int vpd_len = 32; struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); char *buffer = kzalloc(vpd_len, GFP_KERNEL); int ret = 0; if (!buffer) goto out; if (scsi_get_vpd_page(sdev, 0x90, buffer, vpd_len)) goto out; /* * Magic numbers: the VPD Protocol page (0x90) * has a 4 byte header and then one entry per device port * the TLR bit is at offset 8 on each port entry * if we take the first port, that's at total offset 12 */ ret = buffer[12] & 0x01; out: kfree(buffer); rdev->tlr_supported = ret; return ret; } EXPORT_SYMBOL_GPL(sas_tlr_supported); /** * sas_disable_tlr - setting TLR flags * @sdev: scsi device struct * * Seting tlr_enabled flag to 0. * */ void sas_disable_tlr(struct scsi_device *sdev) { struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); rdev->tlr_enabled = 0; } EXPORT_SYMBOL_GPL(sas_disable_tlr); /** * sas_enable_tlr - setting TLR flags * @sdev: scsi device struct * * Seting tlr_enabled flag 1. * */ void sas_enable_tlr(struct scsi_device *sdev) { unsigned int tlr_supported = 0; tlr_supported = sas_tlr_supported(sdev); if (tlr_supported) { struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); rdev->tlr_enabled = 1; } return; } EXPORT_SYMBOL_GPL(sas_enable_tlr); unsigned int sas_is_tlr_enabled(struct scsi_device *sdev) { struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); return rdev->tlr_enabled; } EXPORT_SYMBOL_GPL(sas_is_tlr_enabled); /** * sas_ata_ncq_prio_supported - Check for ATA NCQ command priority support * @sdev: SCSI device * * Check if an ATA device supports NCQ priority using VPD page 89h (ATA * Information). Since this VPD page is implemented only for ATA devices, * this function always returns false for SCSI devices. */ bool sas_ata_ncq_prio_supported(struct scsi_device *sdev) { struct scsi_vpd *vpd; bool ncq_prio_supported = false; rcu_read_lock(); vpd = rcu_dereference(sdev->vpd_pg89); if (vpd && vpd->len >= 214) ncq_prio_supported = (vpd->data[213] >> 4) & 1; rcu_read_unlock(); return ncq_prio_supported; } EXPORT_SYMBOL_GPL(sas_ata_ncq_prio_supported); /* * SAS Phy attributes */ #define sas_phy_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_phy_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ \ return snprintf(buf, 20, format_string, cast phy->field); \ } #define sas_phy_simple_attr(field, name, format_string, type) \ sas_phy_show_simple(field, name, format_string, (type)) \ static DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL) #define sas_phy_show_protocol(field, name) \ static ssize_t \ show_sas_phy_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ \ if (!phy->field) \ return snprintf(buf, 20, "none\n"); \ return get_sas_protocol_names(phy->field, buf); \ } #define sas_phy_protocol_attr(field, name) \ sas_phy_show_protocol(field, name) \ static DEVICE_ATTR(name, S_IRUGO, show_sas_phy_##name, NULL) #define sas_phy_show_linkspeed(field) \ static ssize_t \ show_sas_phy_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ \ return get_sas_linkspeed_names(phy->field, buf); \ } /* Fudge to tell if we're minimum or maximum */ #define sas_phy_store_linkspeed(field) \ static ssize_t \ store_sas_phy_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \ struct sas_internal *i = to_sas_internal(shost->transportt); \ u32 value; \ struct sas_phy_linkrates rates = {0}; \ int error; \ \ error = set_sas_linkspeed_names(&value, buf); \ if (error) \ return error; \ rates.field = value; \ error = i->f->set_phy_speed(phy, &rates); \ \ return error ? error : count; \ } #define sas_phy_linkspeed_rw_attr(field) \ sas_phy_show_linkspeed(field) \ sas_phy_store_linkspeed(field) \ static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, \ store_sas_phy_##field) #define sas_phy_linkspeed_attr(field) \ sas_phy_show_linkspeed(field) \ static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) #define sas_phy_show_linkerror(field) \ static ssize_t \ show_sas_phy_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_phy *phy = transport_class_to_phy(dev); \ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); \ struct sas_internal *i = to_sas_internal(shost->transportt); \ int error; \ \ error = i->f->get_linkerrors ? i->f->get_linkerrors(phy) : 0; \ if (error) \ return error; \ return snprintf(buf, 20, "%u\n", phy->field); \ } #define sas_phy_linkerror_attr(field) \ sas_phy_show_linkerror(field) \ static DEVICE_ATTR(field, S_IRUGO, show_sas_phy_##field, NULL) static ssize_t show_sas_device_type(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_phy *phy = transport_class_to_phy(dev); if (!phy->identify.device_type) return snprintf(buf, 20, "none\n"); return get_sas_device_type_names(phy->identify.device_type, buf); } static DEVICE_ATTR(device_type, S_IRUGO, show_sas_device_type, NULL); static ssize_t do_sas_phy_enable(struct device *dev, size_t count, int enable) { struct sas_phy *phy = transport_class_to_phy(dev); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); int error; error = i->f->phy_enable(phy, enable); if (error) return error; phy->enabled = enable; return count; }; static ssize_t store_sas_phy_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { if (count < 1) return -EINVAL; switch (buf[0]) { case '0': do_sas_phy_enable(dev, count, 0); break; case '1': do_sas_phy_enable(dev, count, 1); break; default: return -EINVAL; } return count; } static ssize_t show_sas_phy_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_phy *phy = transport_class_to_phy(dev); return snprintf(buf, 20, "%d\n", phy->enabled); } static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, show_sas_phy_enable, store_sas_phy_enable); static ssize_t do_sas_phy_reset(struct device *dev, size_t count, int hard_reset) { struct sas_phy *phy = transport_class_to_phy(dev); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); int error; error = i->f->phy_reset(phy, hard_reset); if (error) return error; phy->enabled = 1; return count; }; static ssize_t store_sas_link_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return do_sas_phy_reset(dev, count, 0); } static DEVICE_ATTR(link_reset, S_IWUSR, NULL, store_sas_link_reset); static ssize_t store_sas_hard_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { return do_sas_phy_reset(dev, count, 1); } static DEVICE_ATTR(hard_reset, S_IWUSR, NULL, store_sas_hard_reset); sas_phy_protocol_attr(identify.initiator_port_protocols, initiator_port_protocols); sas_phy_protocol_attr(identify.target_port_protocols, target_port_protocols); sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", unsigned long long); sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); sas_phy_linkspeed_attr(negotiated_linkrate); sas_phy_linkspeed_attr(minimum_linkrate_hw); sas_phy_linkspeed_rw_attr(minimum_linkrate); sas_phy_linkspeed_attr(maximum_linkrate_hw); sas_phy_linkspeed_rw_attr(maximum_linkrate); sas_phy_linkerror_attr(invalid_dword_count); sas_phy_linkerror_attr(running_disparity_error_count); sas_phy_linkerror_attr(loss_of_dword_sync_count); sas_phy_linkerror_attr(phy_reset_problem_count); static int sas_phy_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct sas_phy *phy = dev_to_phy(dev); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); if (i->f->phy_setup) i->f->phy_setup(phy); return 0; } static DECLARE_TRANSPORT_CLASS(sas_phy_class, "sas_phy", sas_phy_setup, NULL, NULL); static int sas_phy_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; if (!scsi_is_sas_phy(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->phy_attr_cont.ac == cont; } static void sas_phy_release(struct device *dev) { struct sas_phy *phy = dev_to_phy(dev); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); if (i->f->phy_release) i->f->phy_release(phy); put_device(dev->parent); kfree(phy); } /** * sas_phy_alloc - allocates and initialize a SAS PHY structure * @parent: Parent device * @number: Phy index * * Allocates an SAS PHY structure. It will be added in the device tree * below the device specified by @parent, which has to be either a Scsi_Host * or sas_rphy. * * Returns: * SAS PHY allocated or %NULL if the allocation failed. */ struct sas_phy *sas_phy_alloc(struct device *parent, int number) { struct Scsi_Host *shost = dev_to_shost(parent); struct sas_phy *phy; phy = kzalloc(sizeof(*phy), GFP_KERNEL); if (!phy) return NULL; phy->number = number; phy->enabled = 1; device_initialize(&phy->dev); phy->dev.parent = get_device(parent); phy->dev.release = sas_phy_release; INIT_LIST_HEAD(&phy->port_siblings); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); dev_set_name(&phy->dev, "phy-%d:%d:%d", shost->host_no, rphy->scsi_target_id, number); } else dev_set_name(&phy->dev, "phy-%d:%d", shost->host_no, number); transport_setup_device(&phy->dev); return phy; } EXPORT_SYMBOL(sas_phy_alloc); /** * sas_phy_add - add a SAS PHY to the device hierarchy * @phy: The PHY to be added * * Publishes a SAS PHY to the rest of the system. */ int sas_phy_add(struct sas_phy *phy) { int error; error = device_add(&phy->dev); if (error) return error; error = transport_add_device(&phy->dev); if (error) { device_del(&phy->dev); return error; } transport_configure_device(&phy->dev); return 0; } EXPORT_SYMBOL(sas_phy_add); /** * sas_phy_free - free a SAS PHY * @phy: SAS PHY to free * * Frees the specified SAS PHY. * * Note: * This function must only be called on a PHY that has not * successfully been added using sas_phy_add(). */ void sas_phy_free(struct sas_phy *phy) { transport_destroy_device(&phy->dev); put_device(&phy->dev); } EXPORT_SYMBOL(sas_phy_free); /** * sas_phy_delete - remove SAS PHY * @phy: SAS PHY to remove * * Removes the specified SAS PHY. If the SAS PHY has an * associated remote PHY it is removed before. */ void sas_phy_delete(struct sas_phy *phy) { struct device *dev = &phy->dev; /* this happens if the phy is still part of a port when deleted */ BUG_ON(!list_empty(&phy->port_siblings)); transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL(sas_phy_delete); /** * scsi_is_sas_phy - check if a struct device represents a SAS PHY * @dev: device to check * * Returns: * %1 if the device represents a SAS PHY, %0 else */ int scsi_is_sas_phy(const struct device *dev) { return dev->release == sas_phy_release; } EXPORT_SYMBOL(scsi_is_sas_phy); /* * SAS Port attributes */ #define sas_port_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_port_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_port *port = transport_class_to_sas_port(dev); \ \ return snprintf(buf, 20, format_string, cast port->field); \ } #define sas_port_simple_attr(field, name, format_string, type) \ sas_port_show_simple(field, name, format_string, (type)) \ static DEVICE_ATTR(name, S_IRUGO, show_sas_port_##name, NULL) sas_port_simple_attr(num_phys, num_phys, "%d\n", int); static DECLARE_TRANSPORT_CLASS(sas_port_class, "sas_port", NULL, NULL, NULL); static int sas_port_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; if (!scsi_is_sas_port(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->port_attr_cont.ac == cont; } static void sas_port_release(struct device *dev) { struct sas_port *port = dev_to_sas_port(dev); BUG_ON(!list_empty(&port->phy_list)); put_device(dev->parent); kfree(port); } static void sas_port_create_link(struct sas_port *port, struct sas_phy *phy) { int res; res = sysfs_create_link(&port->dev.kobj, &phy->dev.kobj, dev_name(&phy->dev)); if (res) goto err; res = sysfs_create_link(&phy->dev.kobj, &port->dev.kobj, "port"); if (res) goto err; return; err: printk(KERN_ERR "%s: Cannot create port links, err=%d\n", __func__, res); } static void sas_port_delete_link(struct sas_port *port, struct sas_phy *phy) { sysfs_remove_link(&port->dev.kobj, dev_name(&phy->dev)); sysfs_remove_link(&phy->dev.kobj, "port"); } /** * sas_port_alloc - allocate and initialize a SAS port structure * * @parent: parent device * @port_id: port number * * Allocates a SAS port structure. It will be added to the device tree * below the device specified by @parent which must be either a Scsi_Host * or a sas_expander_device. * * Returns: %NULL on error */ struct sas_port *sas_port_alloc(struct device *parent, int port_id) { struct Scsi_Host *shost = dev_to_shost(parent); struct sas_port *port; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return NULL; port->port_identifier = port_id; device_initialize(&port->dev); port->dev.parent = get_device(parent); port->dev.release = sas_port_release; mutex_init(&port->phy_list_mutex); INIT_LIST_HEAD(&port->phy_list); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); dev_set_name(&port->dev, "port-%d:%d:%d", shost->host_no, rphy->scsi_target_id, port->port_identifier); } else dev_set_name(&port->dev, "port-%d:%d", shost->host_no, port->port_identifier); transport_setup_device(&port->dev); return port; } EXPORT_SYMBOL(sas_port_alloc); /** * sas_port_alloc_num - allocate and initialize a SAS port structure * * @parent: parent device * * Allocates a SAS port structure and a number to go with it. This * interface is really for adapters where the port number has no * meansing, so the sas class should manage them. It will be added to * the device tree below the device specified by @parent which must be * either a Scsi_Host or a sas_expander_device. * * Returns: %NULL on error */ struct sas_port *sas_port_alloc_num(struct device *parent) { int index; struct Scsi_Host *shost = dev_to_shost(parent); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); /* FIXME: use idr for this eventually */ mutex_lock(&sas_host->lock); if (scsi_is_sas_expander_device(parent)) { struct sas_rphy *rphy = dev_to_rphy(parent); struct sas_expander_device *exp = rphy_to_expander_device(rphy); index = exp->next_port_id++; } else index = sas_host->next_port_id++; mutex_unlock(&sas_host->lock); return sas_port_alloc(parent, index); } EXPORT_SYMBOL(sas_port_alloc_num); /** * sas_port_add - add a SAS port to the device hierarchy * @port: port to be added * * publishes a port to the rest of the system */ int sas_port_add(struct sas_port *port) { int error; /* No phys should be added until this is made visible */ BUG_ON(!list_empty(&port->phy_list)); error = device_add(&port->dev); if (error) return error; transport_add_device(&port->dev); transport_configure_device(&port->dev); return 0; } EXPORT_SYMBOL(sas_port_add); /** * sas_port_free - free a SAS PORT * @port: SAS PORT to free * * Frees the specified SAS PORT. * * Note: * This function must only be called on a PORT that has not * successfully been added using sas_port_add(). */ void sas_port_free(struct sas_port *port) { transport_destroy_device(&port->dev); put_device(&port->dev); } EXPORT_SYMBOL(sas_port_free); /** * sas_port_delete - remove SAS PORT * @port: SAS PORT to remove * * Removes the specified SAS PORT. If the SAS PORT has an * associated phys, unlink them from the port as well. */ void sas_port_delete(struct sas_port *port) { struct device *dev = &port->dev; struct sas_phy *phy, *tmp_phy; if (port->rphy) { sas_rphy_delete(port->rphy); port->rphy = NULL; } mutex_lock(&port->phy_list_mutex); list_for_each_entry_safe(phy, tmp_phy, &port->phy_list, port_siblings) { sas_port_delete_link(port, phy); list_del_init(&phy->port_siblings); } mutex_unlock(&port->phy_list_mutex); if (port->is_backlink) { struct device *parent = port->dev.parent; sysfs_remove_link(&port->dev.kobj, dev_name(parent)); port->is_backlink = 0; } transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL(sas_port_delete); /** * scsi_is_sas_port - check if a struct device represents a SAS port * @dev: device to check * * Returns: * %1 if the device represents a SAS Port, %0 else */ int scsi_is_sas_port(const struct device *dev) { return dev->release == sas_port_release; } EXPORT_SYMBOL(scsi_is_sas_port); /** * sas_port_get_phy - try to take a reference on a port member * @port: port to check */ struct sas_phy *sas_port_get_phy(struct sas_port *port) { struct sas_phy *phy; mutex_lock(&port->phy_list_mutex); if (list_empty(&port->phy_list)) phy = NULL; else { struct list_head *ent = port->phy_list.next; phy = list_entry(ent, typeof(*phy), port_siblings); get_device(&phy->dev); } mutex_unlock(&port->phy_list_mutex); return phy; } EXPORT_SYMBOL(sas_port_get_phy); /** * sas_port_add_phy - add another phy to a port to form a wide port * @port: port to add the phy to * @phy: phy to add * * When a port is initially created, it is empty (has no phys). All * ports must have at least one phy to operated, and all wide ports * must have at least two. The current code makes no difference * between ports and wide ports, but the only object that can be * connected to a remote device is a port, so ports must be formed on * all devices with phys if they're connected to anything. */ void sas_port_add_phy(struct sas_port *port, struct sas_phy *phy) { mutex_lock(&port->phy_list_mutex); if (unlikely(!list_empty(&phy->port_siblings))) { /* make sure we're already on this port */ struct sas_phy *tmp; list_for_each_entry(tmp, &port->phy_list, port_siblings) if (tmp == phy) break; /* If this trips, you added a phy that was already * part of a different port */ if (unlikely(tmp != phy)) { dev_printk(KERN_ERR, &port->dev, "trying to add phy %s fails: it's already part of another port\n", dev_name(&phy->dev)); BUG(); } } else { sas_port_create_link(port, phy); list_add_tail(&phy->port_siblings, &port->phy_list); port->num_phys++; } mutex_unlock(&port->phy_list_mutex); } EXPORT_SYMBOL(sas_port_add_phy); /** * sas_port_delete_phy - remove a phy from a port or wide port * @port: port to remove the phy from * @phy: phy to remove * * This operation is used for tearing down ports again. It must be * done to every port or wide port before calling sas_port_delete. */ void sas_port_delete_phy(struct sas_port *port, struct sas_phy *phy) { mutex_lock(&port->phy_list_mutex); sas_port_delete_link(port, phy); list_del_init(&phy->port_siblings); port->num_phys--; mutex_unlock(&port->phy_list_mutex); } EXPORT_SYMBOL(sas_port_delete_phy); void sas_port_mark_backlink(struct sas_port *port) { int res; struct device *parent = port->dev.parent->parent->parent; if (port->is_backlink) return; port->is_backlink = 1; res = sysfs_create_link(&port->dev.kobj, &parent->kobj, dev_name(parent)); if (res) goto err; return; err: printk(KERN_ERR "%s: Cannot create port backlink, err=%d\n", __func__, res); } EXPORT_SYMBOL(sas_port_mark_backlink); /* * SAS remote PHY attributes. */ #define sas_rphy_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_rphy_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_rphy *rphy = transport_class_to_rphy(dev); \ \ return snprintf(buf, 20, format_string, cast rphy->field); \ } #define sas_rphy_simple_attr(field, name, format_string, type) \ sas_rphy_show_simple(field, name, format_string, (type)) \ static SAS_DEVICE_ATTR(rphy, name, S_IRUGO, \ show_sas_rphy_##name, NULL) #define sas_rphy_show_protocol(field, name) \ static ssize_t \ show_sas_rphy_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_rphy *rphy = transport_class_to_rphy(dev); \ \ if (!rphy->field) \ return snprintf(buf, 20, "none\n"); \ return get_sas_protocol_names(rphy->field, buf); \ } #define sas_rphy_protocol_attr(field, name) \ sas_rphy_show_protocol(field, name) \ static SAS_DEVICE_ATTR(rphy, name, S_IRUGO, \ show_sas_rphy_##name, NULL) static ssize_t show_sas_rphy_device_type(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_rphy *rphy = transport_class_to_rphy(dev); if (!rphy->identify.device_type) return snprintf(buf, 20, "none\n"); return get_sas_device_type_names( rphy->identify.device_type, buf); } static SAS_DEVICE_ATTR(rphy, device_type, S_IRUGO, show_sas_rphy_device_type, NULL); static ssize_t show_sas_rphy_enclosure_identifier(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_rphy *rphy = transport_class_to_rphy(dev); struct sas_phy *phy = dev_to_phy(rphy->dev.parent); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); u64 identifier; int error; error = i->f->get_enclosure_identifier(rphy, &identifier); if (error) return error; return sprintf(buf, "0x%llx\n", (unsigned long long)identifier); } static SAS_DEVICE_ATTR(rphy, enclosure_identifier, S_IRUGO, show_sas_rphy_enclosure_identifier, NULL); static ssize_t show_sas_rphy_bay_identifier(struct device *dev, struct device_attribute *attr, char *buf) { struct sas_rphy *rphy = transport_class_to_rphy(dev); struct sas_phy *phy = dev_to_phy(rphy->dev.parent); struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); struct sas_internal *i = to_sas_internal(shost->transportt); int val; val = i->f->get_bay_identifier(rphy); if (val < 0) return val; return sprintf(buf, "%d\n", val); } static SAS_DEVICE_ATTR(rphy, bay_identifier, S_IRUGO, show_sas_rphy_bay_identifier, NULL); sas_rphy_protocol_attr(identify.initiator_port_protocols, initiator_port_protocols); sas_rphy_protocol_attr(identify.target_port_protocols, target_port_protocols); sas_rphy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n", unsigned long long); sas_rphy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8); sas_rphy_simple_attr(scsi_target_id, scsi_target_id, "%d\n", u32); /* only need 8 bytes of data plus header (4 or 8) */ #define BUF_SIZE 64 int sas_read_port_mode_page(struct scsi_device *sdev) { char *buffer = kzalloc(BUF_SIZE, GFP_KERNEL), *msdata; struct sas_end_device *rdev = sas_sdev_to_rdev(sdev); struct scsi_mode_data mode_data; int error; if (!buffer) return -ENOMEM; error = scsi_mode_sense(sdev, 1, 0x19, 0, buffer, BUF_SIZE, 30*HZ, 3, &mode_data, NULL); if (error) goto out; msdata = buffer + mode_data.header_length + mode_data.block_descriptor_length; if (msdata - buffer > BUF_SIZE - 8) goto out; error = 0; rdev->ready_led_meaning = msdata[2] & 0x10 ? 1 : 0; rdev->I_T_nexus_loss_timeout = (msdata[4] << 8) + msdata[5]; rdev->initiator_response_timeout = (msdata[6] << 8) + msdata[7]; out: kfree(buffer); return error; } EXPORT_SYMBOL(sas_read_port_mode_page); static DECLARE_TRANSPORT_CLASS(sas_end_dev_class, "sas_end_device", NULL, NULL, NULL); #define sas_end_dev_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_end_dev_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_rphy *rphy = transport_class_to_rphy(dev); \ struct sas_end_device *rdev = rphy_to_end_device(rphy); \ \ return snprintf(buf, 20, format_string, cast rdev->field); \ } #define sas_end_dev_simple_attr(field, name, format_string, type) \ sas_end_dev_show_simple(field, name, format_string, (type)) \ static SAS_DEVICE_ATTR(end_dev, name, S_IRUGO, \ show_sas_end_dev_##name, NULL) sas_end_dev_simple_attr(ready_led_meaning, ready_led_meaning, "%d\n", int); sas_end_dev_simple_attr(I_T_nexus_loss_timeout, I_T_nexus_loss_timeout, "%d\n", int); sas_end_dev_simple_attr(initiator_response_timeout, initiator_response_timeout, "%d\n", int); sas_end_dev_simple_attr(tlr_supported, tlr_supported, "%d\n", int); sas_end_dev_simple_attr(tlr_enabled, tlr_enabled, "%d\n", int); static DECLARE_TRANSPORT_CLASS(sas_expander_class, "sas_expander", NULL, NULL, NULL); #define sas_expander_show_simple(field, name, format_string, cast) \ static ssize_t \ show_sas_expander_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct sas_rphy *rphy = transport_class_to_rphy(dev); \ struct sas_expander_device *edev = rphy_to_expander_device(rphy); \ \ return snprintf(buf, 20, format_string, cast edev->field); \ } #define sas_expander_simple_attr(field, name, format_string, type) \ sas_expander_show_simple(field, name, format_string, (type)) \ static SAS_DEVICE_ATTR(expander, name, S_IRUGO, \ show_sas_expander_##name, NULL) sas_expander_simple_attr(vendor_id, vendor_id, "%s\n", char *); sas_expander_simple_attr(product_id, product_id, "%s\n", char *); sas_expander_simple_attr(product_rev, product_rev, "%s\n", char *); sas_expander_simple_attr(component_vendor_id, component_vendor_id, "%s\n", char *); sas_expander_simple_attr(component_id, component_id, "%u\n", unsigned int); sas_expander_simple_attr(component_revision_id, component_revision_id, "%u\n", unsigned int); sas_expander_simple_attr(level, level, "%d\n", int); static DECLARE_TRANSPORT_CLASS(sas_rphy_class, "sas_device", NULL, NULL, NULL); static int sas_rphy_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; if (!scsi_is_sas_rphy(dev)) return 0; shost = dev_to_shost(dev->parent->parent); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->rphy_attr_cont.ac == cont; } static int sas_end_dev_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; struct sas_rphy *rphy; if (!scsi_is_sas_rphy(dev)) return 0; shost = dev_to_shost(dev->parent->parent); rphy = dev_to_rphy(dev); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->end_dev_attr_cont.ac == cont && rphy->identify.device_type == SAS_END_DEVICE; } static int sas_expander_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct sas_internal *i; struct sas_rphy *rphy; if (!scsi_is_sas_rphy(dev)) return 0; shost = dev_to_shost(dev->parent->parent); rphy = dev_to_rphy(dev); if (!shost->transportt) return 0; if (shost->transportt->host_attrs.ac.class != &sas_host_class.class) return 0; i = to_sas_internal(shost->transportt); return &i->expander_attr_cont.ac == cont && (rphy->identify.device_type == SAS_EDGE_EXPANDER_DEVICE || rphy->identify.device_type == SAS_FANOUT_EXPANDER_DEVICE); } static void sas_expander_release(struct device *dev) { struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_expander_device *edev = rphy_to_expander_device(rphy); put_device(dev->parent); kfree(edev); } static void sas_end_device_release(struct device *dev) { struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_end_device *edev = rphy_to_end_device(rphy); put_device(dev->parent); kfree(edev); } /** * sas_rphy_initialize - common rphy initialization * @rphy: rphy to initialise * * Used by both sas_end_device_alloc() and sas_expander_alloc() to * initialise the common rphy component of each. */ static void sas_rphy_initialize(struct sas_rphy *rphy) { INIT_LIST_HEAD(&rphy->list); } /** * sas_end_device_alloc - allocate an rphy for an end device * @parent: which port * * Allocates an SAS remote PHY structure, connected to @parent. * * Returns: * SAS PHY allocated or %NULL if the allocation failed. */ struct sas_rphy *sas_end_device_alloc(struct sas_port *parent) { struct Scsi_Host *shost = dev_to_shost(&parent->dev); struct sas_end_device *rdev; rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); if (!rdev) { return NULL; } device_initialize(&rdev->rphy.dev); rdev->rphy.dev.parent = get_device(&parent->dev); rdev->rphy.dev.release = sas_end_device_release; if (scsi_is_sas_expander_device(parent->dev.parent)) { struct sas_rphy *rphy = dev_to_rphy(parent->dev.parent); dev_set_name(&rdev->rphy.dev, "end_device-%d:%d:%d", shost->host_no, rphy->scsi_target_id, parent->port_identifier); } else dev_set_name(&rdev->rphy.dev, "end_device-%d:%d", shost->host_no, parent->port_identifier); rdev->rphy.identify.device_type = SAS_END_DEVICE; sas_rphy_initialize(&rdev->rphy); transport_setup_device(&rdev->rphy.dev); return &rdev->rphy; } EXPORT_SYMBOL(sas_end_device_alloc); /** * sas_expander_alloc - allocate an rphy for an end device * @parent: which port * @type: SAS_EDGE_EXPANDER_DEVICE or SAS_FANOUT_EXPANDER_DEVICE * * Allocates an SAS remote PHY structure, connected to @parent. * * Returns: * SAS PHY allocated or %NULL if the allocation failed. */ struct sas_rphy *sas_expander_alloc(struct sas_port *parent, enum sas_device_type type) { struct Scsi_Host *shost = dev_to_shost(&parent->dev); struct sas_expander_device *rdev; struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); BUG_ON(type != SAS_EDGE_EXPANDER_DEVICE && type != SAS_FANOUT_EXPANDER_DEVICE); rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); if (!rdev) { return NULL; } device_initialize(&rdev->rphy.dev); rdev->rphy.dev.parent = get_device(&parent->dev); rdev->rphy.dev.release = sas_expander_release; mutex_lock(&sas_host->lock); rdev->rphy.scsi_target_id = sas_host->next_expander_id++; mutex_unlock(&sas_host->lock); dev_set_name(&rdev->rphy.dev, "expander-%d:%d", shost->host_no, rdev->rphy.scsi_target_id); rdev->rphy.identify.device_type = type; sas_rphy_initialize(&rdev->rphy); transport_setup_device(&rdev->rphy.dev); return &rdev->rphy; } EXPORT_SYMBOL(sas_expander_alloc); /** * sas_rphy_add - add a SAS remote PHY to the device hierarchy * @rphy: The remote PHY to be added * * Publishes a SAS remote PHY to the rest of the system. */ int sas_rphy_add(struct sas_rphy *rphy) { struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); struct Scsi_Host *shost = dev_to_shost(parent->dev.parent); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct sas_identify *identify = &rphy->identify; int error; if (parent->rphy) return -ENXIO; parent->rphy = rphy; error = device_add(&rphy->dev); if (error) return error; transport_add_device(&rphy->dev); transport_configure_device(&rphy->dev); if (sas_bsg_initialize(shost, rphy)) printk("fail to a bsg device %s\n", dev_name(&rphy->dev)); mutex_lock(&sas_host->lock); list_add_tail(&rphy->list, &sas_host->rphy_list); if (identify->device_type == SAS_END_DEVICE && (identify->target_port_protocols & (SAS_PROTOCOL_SSP | SAS_PROTOCOL_STP | SAS_PROTOCOL_SATA))) rphy->scsi_target_id = sas_host->next_target_id++; else if (identify->device_type == SAS_END_DEVICE) rphy->scsi_target_id = -1; mutex_unlock(&sas_host->lock); if (identify->device_type == SAS_END_DEVICE && rphy->scsi_target_id != -1) { int lun; if (identify->target_port_protocols & SAS_PROTOCOL_SSP) lun = SCAN_WILD_CARD; else lun = 0; scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, SCSI_SCAN_INITIAL); } return 0; } EXPORT_SYMBOL(sas_rphy_add); /** * sas_rphy_free - free a SAS remote PHY * @rphy: SAS remote PHY to free * * Frees the specified SAS remote PHY. * * Note: * This function must only be called on a remote * PHY that has not successfully been added using * sas_rphy_add() (or has been sas_rphy_remove()'d) */ void sas_rphy_free(struct sas_rphy *rphy) { struct device *dev = &rphy->dev; struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); mutex_lock(&sas_host->lock); list_del(&rphy->list); mutex_unlock(&sas_host->lock); transport_destroy_device(dev); put_device(dev); } EXPORT_SYMBOL(sas_rphy_free); /** * sas_rphy_delete - remove and free SAS remote PHY * @rphy: SAS remote PHY to remove and free * * Removes the specified SAS remote PHY and frees it. */ void sas_rphy_delete(struct sas_rphy *rphy) { sas_rphy_remove(rphy); sas_rphy_free(rphy); } EXPORT_SYMBOL(sas_rphy_delete); /** * sas_rphy_unlink - unlink SAS remote PHY * @rphy: SAS remote phy to unlink from its parent port * * Removes port reference to an rphy */ void sas_rphy_unlink(struct sas_rphy *rphy) { struct sas_port *parent = dev_to_sas_port(rphy->dev.parent); parent->rphy = NULL; } EXPORT_SYMBOL(sas_rphy_unlink); /** * sas_rphy_remove - remove SAS remote PHY * @rphy: SAS remote phy to remove * * Removes the specified SAS remote PHY. */ void sas_rphy_remove(struct sas_rphy *rphy) { struct device *dev = &rphy->dev; switch (rphy->identify.device_type) { case SAS_END_DEVICE: scsi_remove_target(dev); break; case SAS_EDGE_EXPANDER_DEVICE: case SAS_FANOUT_EXPANDER_DEVICE: sas_remove_children(dev); break; default: break; } sas_rphy_unlink(rphy); bsg_remove_queue(rphy->q); transport_remove_device(dev); device_del(dev); } EXPORT_SYMBOL(sas_rphy_remove); /** * scsi_is_sas_rphy - check if a struct device represents a SAS remote PHY * @dev: device to check * * Returns: * %1 if the device represents a SAS remote PHY, %0 else */ int scsi_is_sas_rphy(const struct device *dev) { return dev->release == sas_end_device_release || dev->release == sas_expander_release; } EXPORT_SYMBOL(scsi_is_sas_rphy); /* * SCSI scan helper */ static int sas_user_scan(struct Scsi_Host *shost, uint channel, uint id, u64 lun) { struct sas_host_attrs *sas_host = to_sas_host_attrs(shost); struct sas_rphy *rphy; mutex_lock(&sas_host->lock); list_for_each_entry(rphy, &sas_host->rphy_list, list) { if (rphy->identify.device_type != SAS_END_DEVICE || rphy->scsi_target_id == -1) continue; if ((channel == SCAN_WILD_CARD || channel == 0) && (id == SCAN_WILD_CARD || id == rphy->scsi_target_id)) { scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, SCSI_SCAN_MANUAL); } } mutex_unlock(&sas_host->lock); return 0; } /* * Setup / Teardown code */ #define SETUP_TEMPLATE(attrb, field, perm, test) \ i->private_##attrb[count] = dev_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ count++ #define SETUP_TEMPLATE_RW(attrb, field, perm, test, ro_test, ro_perm) \ i->private_##attrb[count] = dev_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ if (ro_test) { \ i->private_##attrb[count].attr.mode = ro_perm; \ i->private_##attrb[count].store = NULL; \ } \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ count++ #define SETUP_RPORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, 1) #define SETUP_OPTIONAL_RPORT_ATTRIBUTE(field, func) \ SETUP_TEMPLATE(rphy_attrs, field, S_IRUGO, i->f->func) #define SETUP_PHY_ATTRIBUTE(field) \ SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, 1) #define SETUP_PHY_ATTRIBUTE_RW(field) \ SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ !i->f->set_phy_speed, S_IRUGO) #define SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(field, func) \ SETUP_TEMPLATE_RW(phy_attrs, field, S_IRUGO | S_IWUSR, 1, \ !i->f->func, S_IRUGO) #define SETUP_PORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) #define SETUP_OPTIONAL_PHY_ATTRIBUTE(field, func) \ SETUP_TEMPLATE(phy_attrs, field, S_IRUGO, i->f->func) #define SETUP_PHY_ATTRIBUTE_WRONLY(field) \ SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, 1) #define SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(field, func) \ SETUP_TEMPLATE(phy_attrs, field, S_IWUSR, i->f->func) #define SETUP_END_DEV_ATTRIBUTE(field) \ SETUP_TEMPLATE(end_dev_attrs, field, S_IRUGO, 1) #define SETUP_EXPANDER_ATTRIBUTE(field) \ SETUP_TEMPLATE(expander_attrs, expander_##field, S_IRUGO, 1) /** * sas_attach_transport - instantiate SAS transport template * @ft: SAS transport class function template */ struct scsi_transport_template * sas_attach_transport(struct sas_function_template *ft) { struct sas_internal *i; int count; i = kzalloc(sizeof(struct sas_internal), GFP_KERNEL); if (!i) return NULL; i->t.user_scan = sas_user_scan; i->t.host_attrs.ac.attrs = &i->host_attrs[0]; i->t.host_attrs.ac.class = &sas_host_class.class; i->t.host_attrs.ac.match = sas_host_match; transport_container_register(&i->t.host_attrs); i->t.host_size = sizeof(struct sas_host_attrs); i->phy_attr_cont.ac.class = &sas_phy_class.class; i->phy_attr_cont.ac.attrs = &i->phy_attrs[0]; i->phy_attr_cont.ac.match = sas_phy_match; transport_container_register(&i->phy_attr_cont); i->port_attr_cont.ac.class = &sas_port_class.class; i->port_attr_cont.ac.attrs = &i->port_attrs[0]; i->port_attr_cont.ac.match = sas_port_match; transport_container_register(&i->port_attr_cont); i->rphy_attr_cont.ac.class = &sas_rphy_class.class; i->rphy_attr_cont.ac.attrs = &i->rphy_attrs[0]; i->rphy_attr_cont.ac.match = sas_rphy_match; transport_container_register(&i->rphy_attr_cont); i->end_dev_attr_cont.ac.class = &sas_end_dev_class.class; i->end_dev_attr_cont.ac.attrs = &i->end_dev_attrs[0]; i->end_dev_attr_cont.ac.match = sas_end_dev_match; transport_container_register(&i->end_dev_attr_cont); i->expander_attr_cont.ac.class = &sas_expander_class.class; i->expander_attr_cont.ac.attrs = &i->expander_attrs[0]; i->expander_attr_cont.ac.match = sas_expander_match; transport_container_register(&i->expander_attr_cont); i->f = ft; count = 0; SETUP_PHY_ATTRIBUTE(initiator_port_protocols); SETUP_PHY_ATTRIBUTE(target_port_protocols); SETUP_PHY_ATTRIBUTE(device_type); SETUP_PHY_ATTRIBUTE(sas_address); SETUP_PHY_ATTRIBUTE(phy_identifier); SETUP_PHY_ATTRIBUTE(negotiated_linkrate); SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw); SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate); SETUP_PHY_ATTRIBUTE(maximum_linkrate_hw); SETUP_PHY_ATTRIBUTE_RW(maximum_linkrate); SETUP_PHY_ATTRIBUTE(invalid_dword_count); SETUP_PHY_ATTRIBUTE(running_disparity_error_count); SETUP_PHY_ATTRIBUTE(loss_of_dword_sync_count); SETUP_PHY_ATTRIBUTE(phy_reset_problem_count); SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(link_reset, phy_reset); SETUP_OPTIONAL_PHY_ATTRIBUTE_WRONLY(hard_reset, phy_reset); SETUP_OPTIONAL_PHY_ATTRIBUTE_RW(enable, phy_enable); i->phy_attrs[count] = NULL; count = 0; SETUP_PORT_ATTRIBUTE(num_phys); i->port_attrs[count] = NULL; count = 0; SETUP_RPORT_ATTRIBUTE(rphy_initiator_port_protocols); SETUP_RPORT_ATTRIBUTE(rphy_target_port_protocols); SETUP_RPORT_ATTRIBUTE(rphy_device_type); SETUP_RPORT_ATTRIBUTE(rphy_sas_address); SETUP_RPORT_ATTRIBUTE(rphy_phy_identifier); SETUP_RPORT_ATTRIBUTE(rphy_scsi_target_id); SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_enclosure_identifier, get_enclosure_identifier); SETUP_OPTIONAL_RPORT_ATTRIBUTE(rphy_bay_identifier, get_bay_identifier); i->rphy_attrs[count] = NULL; count = 0; SETUP_END_DEV_ATTRIBUTE(end_dev_ready_led_meaning); SETUP_END_DEV_ATTRIBUTE(end_dev_I_T_nexus_loss_timeout); SETUP_END_DEV_ATTRIBUTE(end_dev_initiator_response_timeout); SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_supported); SETUP_END_DEV_ATTRIBUTE(end_dev_tlr_enabled); i->end_dev_attrs[count] = NULL; count = 0; SETUP_EXPANDER_ATTRIBUTE(vendor_id); SETUP_EXPANDER_ATTRIBUTE(product_id); SETUP_EXPANDER_ATTRIBUTE(product_rev); SETUP_EXPANDER_ATTRIBUTE(component_vendor_id); SETUP_EXPANDER_ATTRIBUTE(component_id); SETUP_EXPANDER_ATTRIBUTE(component_revision_id); SETUP_EXPANDER_ATTRIBUTE(level); i->expander_attrs[count] = NULL; return &i->t; } EXPORT_SYMBOL(sas_attach_transport); /** * sas_release_transport - release SAS transport template instance * @t: transport template instance */ void sas_release_transport(struct scsi_transport_template *t) { struct sas_internal *i = to_sas_internal(t); transport_container_unregister(&i->t.host_attrs); transport_container_unregister(&i->phy_attr_cont); transport_container_unregister(&i->port_attr_cont); transport_container_unregister(&i->rphy_attr_cont); transport_container_unregister(&i->end_dev_attr_cont); transport_container_unregister(&i->expander_attr_cont); kfree(i); } EXPORT_SYMBOL(sas_release_transport); static __init int sas_transport_init(void) { int error; error = transport_class_register(&sas_host_class); if (error) goto out; error = transport_class_register(&sas_phy_class); if (error) goto out_unregister_transport; error = transport_class_register(&sas_port_class); if (error) goto out_unregister_phy; error = transport_class_register(&sas_rphy_class); if (error) goto out_unregister_port; error = transport_class_register(&sas_end_dev_class); if (error) goto out_unregister_rphy; error = transport_class_register(&sas_expander_class); if (error) goto out_unregister_end_dev; return 0; out_unregister_end_dev: transport_class_unregister(&sas_end_dev_class); out_unregister_rphy: transport_class_unregister(&sas_rphy_class); out_unregister_port: transport_class_unregister(&sas_port_class); out_unregister_phy: transport_class_unregister(&sas_phy_class); out_unregister_transport: transport_class_unregister(&sas_host_class); out: return error; } static void __exit sas_transport_exit(void) { transport_class_unregister(&sas_host_class); transport_class_unregister(&sas_phy_class); transport_class_unregister(&sas_port_class); transport_class_unregister(&sas_rphy_class); transport_class_unregister(&sas_end_dev_class); transport_class_unregister(&sas_expander_class); } MODULE_AUTHOR("Christoph Hellwig"); MODULE_DESCRIPTION("SAS Transport Attributes"); MODULE_LICENSE("GPL"); module_init(sas_transport_init); module_exit(sas_transport_exit); |
7756 7755 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PGALLOC_TRACK_H #define _LINUX_PGALLOC_TRACK_H #if defined(CONFIG_MMU) static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, unsigned long address, pgtbl_mod_mask *mod_mask) { if (unlikely(pgd_none(*pgd))) { if (__p4d_alloc(mm, pgd, address)) return NULL; *mod_mask |= PGTBL_PGD_MODIFIED; } return p4d_offset(pgd, address); } static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d, unsigned long address, pgtbl_mod_mask *mod_mask) { if (unlikely(p4d_none(*p4d))) { if (__pud_alloc(mm, p4d, address)) return NULL; *mod_mask |= PGTBL_P4D_MODIFIED; } return pud_offset(p4d, address); } static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud, unsigned long address, pgtbl_mod_mask *mod_mask) { if (unlikely(pud_none(*pud))) { if (__pmd_alloc(mm, pud, address)) return NULL; *mod_mask |= PGTBL_PUD_MODIFIED; } return pmd_offset(pud, address); } #endif /* CONFIG_MMU */ #define pte_alloc_kernel_track(pmd, address, mask) \ ((unlikely(pmd_none(*(pmd))) && \ (__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\ NULL: pte_offset_kernel(pmd, address)) #endif /* _LINUX_PGALLOC_TRACK_H */ |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2006, Intel Corporation. * * Copyright (C) 2006-2008 Intel Corporation * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ #ifndef _IOVA_H_ #define _IOVA_H_ #include <linux/types.h> #include <linux/kernel.h> #include <linux/rbtree.h> #include <linux/dma-mapping.h> /* iova structure */ struct iova { struct rb_node node; unsigned long pfn_hi; /* Highest allocated pfn */ unsigned long pfn_lo; /* Lowest allocated pfn */ }; struct iova_rcache; /* holds all the iova translations for a domain */ struct iova_domain { spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ struct rb_root rbroot; /* iova domain rbtree root */ struct rb_node *cached_node; /* Save last alloced node */ struct rb_node *cached32_node; /* Save last 32-bit alloced node */ unsigned long granule; /* pfn granularity for this domain */ unsigned long start_pfn; /* Lower limit for this domain */ unsigned long dma_32bit_pfn; unsigned long max32_alloc_size; /* Size of last failed allocation */ struct iova anchor; /* rbtree lookup anchor */ struct iova_rcache *rcaches; struct hlist_node cpuhp_dead; }; static inline unsigned long iova_size(struct iova *iova) { return iova->pfn_hi - iova->pfn_lo + 1; } static inline unsigned long iova_shift(struct iova_domain *iovad) { return __ffs(iovad->granule); } static inline unsigned long iova_mask(struct iova_domain *iovad) { return iovad->granule - 1; } static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) { return iova & iova_mask(iovad); } static inline size_t iova_align(struct iova_domain *iovad, size_t size) { return ALIGN(size, iovad->granule); } static inline size_t iova_align_down(struct iova_domain *iovad, size_t size) { return ALIGN_DOWN(size, iovad->granule); } static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) { return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); } static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) { return iova >> iova_shift(iovad); } #if IS_REACHABLE(CONFIG_IOMMU_IOVA) int iova_cache_get(void); void iova_cache_put(void); unsigned long iova_rcache_range(void); void free_iova(struct iova_domain *iovad, unsigned long pfn); void __free_iova(struct iova_domain *iovad, struct iova *iova); struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned); void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size); unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache); struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn); int iova_domain_init_rcaches(struct iova_domain *iovad); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); #else static inline int iova_cache_get(void) { return -ENOTSUPP; } static inline void iova_cache_put(void) { } static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) { } static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) { } static inline struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool size_aligned) { return NULL; } static inline void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) { } static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn, bool flush_rcache) { return 0; } static inline struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, unsigned long pfn_hi) { return NULL; } static inline void init_iova_domain(struct iova_domain *iovad, unsigned long granule, unsigned long start_pfn) { } static inline struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) { return NULL; } static inline void put_iova_domain(struct iova_domain *iovad) { } #endif #endif |
21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 | /* * Copyright (c) 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. * Copyright (c) 2005-2017 Mellanox Technologies. All rights reserved. * Copyright (c) 2005 Voltaire, Inc. All rights reserved. * Copyright (c) 2005 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef RDMA_CORE_H #define RDMA_CORE_H #include <linux/idr.h> #include <rdma/uverbs_types.h> #include <rdma/uverbs_ioctl.h> #include <rdma/ib_verbs.h> #include <linux/mutex.h> struct ib_uverbs_device; void uverbs_destroy_ufile_hw(struct ib_uverbs_file *ufile, enum rdma_remove_reason reason); int uobj_destroy(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs); /* * Get an ib_uobject that corresponds to the given id from ufile, assuming * the object is from the given type. Lock it to the required access when * applicable. * This function could create (access == NEW), destroy (access == DESTROY) * or unlock (access == READ || access == WRITE) objects if required. * The action will be finalized only when uverbs_finalize_object or * uverbs_finalize_objects are called. */ struct ib_uobject * uverbs_get_uobject_from_file(u16 object_id, enum uverbs_obj_access access, s64 id, struct uverbs_attr_bundle *attrs); void uverbs_finalize_object(struct ib_uobject *uobj, enum uverbs_obj_access access, bool hw_obj_valid, bool commit, struct uverbs_attr_bundle *attrs); int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx); void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile); void release_ufile_idr_uobject(struct ib_uverbs_file *ufile); struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs); /* * This is the runtime description of the uverbs API, used by the syscall * machinery to validate and dispatch calls. */ /* * Depending on ID the slot pointer in the radix tree points at one of these * structs. */ struct uverbs_api_ioctl_method { int(__rcu *handler)(struct uverbs_attr_bundle *attrs); DECLARE_BITMAP(attr_mandatory, UVERBS_API_ATTR_BKEY_LEN); u16 bundle_size; u8 use_stack:1; u8 driver_method:1; u8 disabled:1; u8 has_udata:1; u8 key_bitmap_len; u8 destroy_bkey; }; struct uverbs_api_write_method { int (*handler)(struct uverbs_attr_bundle *attrs); u8 disabled:1; u8 is_ex:1; u8 has_udata:1; u8 has_resp:1; u8 req_size; u8 resp_size; }; struct uverbs_api_attr { struct uverbs_attr_spec spec; }; struct uverbs_api { /* radix tree contains struct uverbs_api_* pointers */ struct radix_tree_root radix; enum rdma_driver_id driver_id; unsigned int num_write; unsigned int num_write_ex; struct uverbs_api_write_method notsupp_method; const struct uverbs_api_write_method **write_methods; const struct uverbs_api_write_method **write_ex_methods; }; /* * Get an uverbs_api_object that corresponds to the given object_id. * Note: * -ENOMSG means that any object is allowed to match during lookup. */ static inline const struct uverbs_api_object * uapi_get_object(struct uverbs_api *uapi, u16 object_id) { const struct uverbs_api_object *res; if (object_id == UVERBS_IDR_ANY_OBJECT) return ERR_PTR(-ENOMSG); res = radix_tree_lookup(&uapi->radix, uapi_key_obj(object_id)); if (!res) return ERR_PTR(-ENOENT); return res; } char *uapi_key_format(char *S, unsigned int key); struct uverbs_api *uverbs_alloc_api(struct ib_device *ibdev); void uverbs_disassociate_api_pre(struct ib_uverbs_device *uverbs_dev); void uverbs_disassociate_api(struct uverbs_api *uapi); void uverbs_destroy_api(struct uverbs_api *uapi); void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm, unsigned int num_attrs); void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile); extern const struct uapi_definition uverbs_def_obj_async_fd[]; extern const struct uapi_definition uverbs_def_obj_counters[]; extern const struct uapi_definition uverbs_def_obj_cq[]; extern const struct uapi_definition uverbs_def_obj_device[]; extern const struct uapi_definition uverbs_def_obj_dm[]; extern const struct uapi_definition uverbs_def_obj_flow_action[]; extern const struct uapi_definition uverbs_def_obj_intf[]; extern const struct uapi_definition uverbs_def_obj_mr[]; extern const struct uapi_definition uverbs_def_obj_qp[]; extern const struct uapi_definition uverbs_def_obj_srq[]; extern const struct uapi_definition uverbs_def_obj_wq[]; extern const struct uapi_definition uverbs_def_write_intf[]; static inline const struct uverbs_api_write_method * uapi_get_method(const struct uverbs_api *uapi, u32 command) { u32 cmd_idx = command & IB_USER_VERBS_CMD_COMMAND_MASK; if (command & ~(u32)(IB_USER_VERBS_CMD_FLAG_EXTENDED | IB_USER_VERBS_CMD_COMMAND_MASK)) return ERR_PTR(-EINVAL); if (command & IB_USER_VERBS_CMD_FLAG_EXTENDED) { if (cmd_idx >= uapi->num_write_ex) return ERR_PTR(-EOPNOTSUPP); return uapi->write_ex_methods[cmd_idx]; } if (cmd_idx >= uapi->num_write) return ERR_PTR(-EOPNOTSUPP); return uapi->write_methods[cmd_idx]; } void uverbs_fill_udata(struct uverbs_attr_bundle *bundle, struct ib_udata *udata, unsigned int attr_in, unsigned int attr_out); #endif /* RDMA_CORE_H */ |
19 19 19 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 | // SPDX-License-Identifier: GPL-2.0-only /* * Error string handling * * Plan 9 uses error strings, Unix uses error numbers. These functions * try to help manage that and provide for dynamically adding error * mappings. * * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/list.h> #include <linux/jhash.h> #include <linux/errno.h> #include <linux/hashtable.h> #include <net/9p/9p.h> /** * struct errormap - map string errors from Plan 9 to Linux numeric ids * @name: string sent over 9P * @val: numeric id most closely representing @name * @namelen: length of string * @list: hash-table list for string lookup */ struct errormap { char *name; int val; int namelen; struct hlist_node list; }; #define ERRHASH_BITS 5 static DEFINE_HASHTABLE(hash_errmap, ERRHASH_BITS); /* FixMe - reduce to a reasonable size */ static struct errormap errmap[] = { {"Operation not permitted", EPERM}, {"wstat prohibited", EPERM}, {"No such file or directory", ENOENT}, {"directory entry not found", ENOENT}, {"file not found", ENOENT}, {"Interrupted system call", EINTR}, {"Input/output error", EIO}, {"No such device or address", ENXIO}, {"Argument list too long", E2BIG}, {"Bad file descriptor", EBADF}, {"Resource temporarily unavailable", EAGAIN}, {"Cannot allocate memory", ENOMEM}, {"Permission denied", EACCES}, {"Bad address", EFAULT}, {"Block device required", ENOTBLK}, {"Device or resource busy", EBUSY}, {"File exists", EEXIST}, {"Invalid cross-device link", EXDEV}, {"No such device", ENODEV}, {"Not a directory", ENOTDIR}, {"Is a directory", EISDIR}, {"Invalid argument", EINVAL}, {"Too many open files in system", ENFILE}, {"Too many open files", EMFILE}, {"Text file busy", ETXTBSY}, {"File too large", EFBIG}, {"No space left on device", ENOSPC}, {"Illegal seek", ESPIPE}, {"Read-only file system", EROFS}, {"Too many links", EMLINK}, {"Broken pipe", EPIPE}, {"Numerical argument out of domain", EDOM}, {"Numerical result out of range", ERANGE}, {"Resource deadlock avoided", EDEADLK}, {"File name too long", ENAMETOOLONG}, {"No locks available", ENOLCK}, {"Function not implemented", ENOSYS}, {"Directory not empty", ENOTEMPTY}, {"Too many levels of symbolic links", ELOOP}, {"No message of desired type", ENOMSG}, {"Identifier removed", EIDRM}, {"No data available", ENODATA}, {"Machine is not on the network", ENONET}, {"Package not installed", ENOPKG}, {"Object is remote", EREMOTE}, {"Link has been severed", ENOLINK}, {"Communication error on send", ECOMM}, {"Protocol error", EPROTO}, {"Bad message", EBADMSG}, {"File descriptor in bad state", EBADFD}, {"Streams pipe error", ESTRPIPE}, {"Too many users", EUSERS}, {"Socket operation on non-socket", ENOTSOCK}, {"Message too long", EMSGSIZE}, {"Protocol not available", ENOPROTOOPT}, {"Protocol not supported", EPROTONOSUPPORT}, {"Socket type not supported", ESOCKTNOSUPPORT}, {"Operation not supported", EOPNOTSUPP}, {"Protocol family not supported", EPFNOSUPPORT}, {"Network is down", ENETDOWN}, {"Network is unreachable", ENETUNREACH}, {"Network dropped connection on reset", ENETRESET}, {"Software caused connection abort", ECONNABORTED}, {"Connection reset by peer", ECONNRESET}, {"No buffer space available", ENOBUFS}, {"Transport endpoint is already connected", EISCONN}, {"Transport endpoint is not connected", ENOTCONN}, {"Cannot send after transport endpoint shutdown", ESHUTDOWN}, {"Connection timed out", ETIMEDOUT}, {"Connection refused", ECONNREFUSED}, {"Host is down", EHOSTDOWN}, {"No route to host", EHOSTUNREACH}, {"Operation already in progress", EALREADY}, {"Operation now in progress", EINPROGRESS}, {"Is a named type file", EISNAM}, {"Remote I/O error", EREMOTEIO}, {"Disk quota exceeded", EDQUOT}, /* errors from fossil, vacfs, and u9fs */ {"fid unknown or out of range", EBADF}, {"permission denied", EACCES}, {"file does not exist", ENOENT}, {"authentication failed", ECONNREFUSED}, {"bad offset in directory read", ESPIPE}, {"bad use of fid", EBADF}, {"wstat can't convert between files and directories", EPERM}, {"directory is not empty", ENOTEMPTY}, {"file exists", EEXIST}, {"file already exists", EEXIST}, {"file or directory already exists", EEXIST}, {"fid already in use", EBADF}, {"file in use", ETXTBSY}, {"i/o error", EIO}, {"file already open for I/O", ETXTBSY}, {"illegal mode", EINVAL}, {"illegal name", ENAMETOOLONG}, {"not a directory", ENOTDIR}, {"not a member of proposed group", EPERM}, {"not owner", EACCES}, {"only owner can change group in wstat", EACCES}, {"read only file system", EROFS}, {"no access to special file", EPERM}, {"i/o count too large", EIO}, {"unknown group", EINVAL}, {"unknown user", EINVAL}, {"bogus wstat buffer", EPROTO}, {"exclusive use file already open", EAGAIN}, {"corrupted directory entry", EIO}, {"corrupted file entry", EIO}, {"corrupted block label", EIO}, {"corrupted meta data", EIO}, {"illegal offset", EINVAL}, {"illegal path element", ENOENT}, {"root of file system is corrupted", EIO}, {"corrupted super block", EIO}, {"protocol botch", EPROTO}, {"file system is full", ENOSPC}, {"file is in use", EAGAIN}, {"directory entry is not allocated", ENOENT}, {"file is read only", EROFS}, {"file has been removed", EIDRM}, {"only support truncation to zero length", EPERM}, {"cannot remove root", EPERM}, {"file too big", EFBIG}, {"venti i/o error", EIO}, /* these are not errors */ {"u9fs rhostsauth: no authentication required", 0}, {"u9fs authnone: no authentication required", 0}, {NULL, -1} }; /** * p9_error_init - preload mappings into hash list * */ int p9_error_init(void) { struct errormap *c; u32 hash; /* load initial error map into hash table */ for (c = errmap; c->name; c++) { c->namelen = strlen(c->name); hash = jhash(c->name, c->namelen, 0); INIT_HLIST_NODE(&c->list); hash_add(hash_errmap, &c->list, hash); } return 1; } EXPORT_SYMBOL(p9_error_init); /** * p9_errstr2errno - convert error string to error number * @errstr: error string * @len: length of error string * */ int p9_errstr2errno(char *errstr, int len) { int errno; struct errormap *c; u32 hash; errno = 0; c = NULL; hash = jhash(errstr, len, 0); hash_for_each_possible(hash_errmap, c, list, hash) { if (c->namelen == len && !memcmp(c->name, errstr, len)) { errno = c->val; break; } } if (errno == 0) { /* TODO: if error isn't found, add it dynamically */ errstr[len] = 0; pr_err("%s: server reported unknown error %s\n", __func__, errstr); errno = ESERVERFAULT; } return -errno; } EXPORT_SYMBOL(p9_errstr2errno); |
2181 2112 219 218 188 69 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 | // SPDX-License-Identifier: GPL-2.0-only /* net/atm/clip.c - RFC1577 Classical IP over ATM */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/string.h> #include <linux/errno.h> #include <linux/kernel.h> /* for UINT_MAX */ #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/wait.h> #include <linux/timer.h> #include <linux/if_arp.h> /* for some manifest constants */ #include <linux/notifier.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/atmclip.h> #include <linux/atmarp.h> #include <linux/capability.h> #include <linux/ip.h> /* for net/route.h */ #include <linux/in.h> /* for struct sockaddr_in */ #include <linux/if.h> /* for IFF_UP */ #include <linux/inetdevice.h> #include <linux/bitops.h> #include <linux/poison.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/rcupdate.h> #include <linux/jhash.h> #include <linux/slab.h> #include <net/route.h> /* for struct rtable and routing */ #include <net/icmp.h> /* icmp_send */ #include <net/arp.h> #include <linux/param.h> /* for HZ */ #include <linux/uaccess.h> #include <asm/byteorder.h> /* for htons etc. */ #include <linux/atomic.h> #include "common.h" #include "resources.h" #include <net/atmclip.h> static struct net_device *clip_devs; static struct atm_vcc *atmarpd; static struct timer_list idle_timer; static const struct neigh_ops clip_neigh_ops; static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip) { struct sock *sk; struct atmarp_ctrl *ctrl; struct sk_buff *skb; pr_debug("(%d)\n", type); if (!atmarpd) return -EUNATCH; skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC); if (!skb) return -ENOMEM; ctrl = skb_put(skb, sizeof(struct atmarp_ctrl)); ctrl->type = type; ctrl->itf_num = itf; ctrl->ip = ip; atm_force_charge(atmarpd, skb->truesize); sk = sk_atm(atmarpd); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk); return 0; } static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry) { pr_debug("%p to entry %p (neigh %p)\n", clip_vcc, entry, entry->neigh); clip_vcc->entry = entry; clip_vcc->xoff = 0; /* @@@ may overrun buffer by one packet */ clip_vcc->next = entry->vccs; entry->vccs = clip_vcc; entry->neigh->used = jiffies; } static void unlink_clip_vcc(struct clip_vcc *clip_vcc) { struct atmarp_entry *entry = clip_vcc->entry; struct clip_vcc **walk; if (!entry) { pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); return; } netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ entry->neigh->used = jiffies; for (walk = &entry->vccs; *walk; walk = &(*walk)->next) if (*walk == clip_vcc) { int error; *walk = clip_vcc->next; /* atomic */ clip_vcc->entry = NULL; if (clip_vcc->xoff) netif_wake_queue(entry->neigh->dev); if (entry->vccs) goto out; entry->expires = jiffies - 1; /* force resolution or expiration */ error = neigh_update(entry->neigh, NULL, NUD_NONE, NEIGH_UPDATE_F_ADMIN, 0); if (error) pr_err("neigh_update failed with %d\n", error); goto out; } pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); out: netif_tx_unlock_bh(entry->neigh->dev); } /* The neighbour entry n->lock is held. */ static int neigh_check_cb(struct neighbour *n) { struct atmarp_entry *entry = neighbour_priv(n); struct clip_vcc *cv; if (n->ops != &clip_neigh_ops) return 0; for (cv = entry->vccs; cv; cv = cv->next) { unsigned long exp = cv->last_use + cv->idle_timeout; if (cv->idle_timeout && time_after(jiffies, exp)) { pr_debug("releasing vcc %p->%p of entry %p\n", cv, cv->vcc, entry); vcc_release_async(cv->vcc, -ETIMEDOUT); } } if (entry->vccs || time_before(jiffies, entry->expires)) return 0; if (refcount_read(&n->refcnt) > 1) { struct sk_buff *skb; pr_debug("destruction postponed with ref %d\n", refcount_read(&n->refcnt)); while ((skb = skb_dequeue(&n->arp_queue)) != NULL) dev_kfree_skb(skb); return 0; } pr_debug("expired neigh %p\n", n); return 1; } static void idle_timer_check(struct timer_list *unused) { write_lock(&arp_tbl.lock); __neigh_for_each_release(&arp_tbl, neigh_check_cb); mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ); write_unlock(&arp_tbl.lock); } static int clip_arp_rcv(struct sk_buff *skb) { struct atm_vcc *vcc; pr_debug("\n"); vcc = ATM_SKB(skb)->vcc; if (!vcc || !atm_charge(vcc, skb->truesize)) { dev_kfree_skb_any(skb); return 0; } pr_debug("pushing to %p\n", vcc); pr_debug("using %p\n", CLIP_VCC(vcc)->old_push); CLIP_VCC(vcc)->old_push(vcc, skb); return 0; } static const unsigned char llc_oui[] = { 0xaa, /* DSAP: non-ISO */ 0xaa, /* SSAP: non-ISO */ 0x03, /* Ctrl: Unnumbered Information Command PDU */ 0x00, /* OUI: EtherType */ 0x00, 0x00 }; static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb) { struct clip_vcc *clip_vcc = CLIP_VCC(vcc); pr_debug("\n"); if (!clip_devs) { atm_return(vcc, skb->truesize); kfree_skb(skb); return; } if (!skb) { pr_debug("removing VCC %p\n", clip_vcc); if (clip_vcc->entry) unlink_clip_vcc(clip_vcc); clip_vcc->old_push(vcc, NULL); /* pass on the bad news */ kfree(clip_vcc); return; } atm_return(vcc, skb->truesize); skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs; /* clip_vcc->entry == NULL if we don't have an IP address yet */ if (!skb->dev) { dev_kfree_skb_any(skb); return; } ATM_SKB(skb)->vcc = vcc; skb_reset_mac_header(skb); if (!clip_vcc->encap || skb->len < RFC1483LLC_LEN || memcmp(skb->data, llc_oui, sizeof(llc_oui))) skb->protocol = htons(ETH_P_IP); else { skb->protocol = ((__be16 *)skb->data)[3]; skb_pull(skb, RFC1483LLC_LEN); if (skb->protocol == htons(ETH_P_ARP)) { skb->dev->stats.rx_packets++; skb->dev->stats.rx_bytes += skb->len; clip_arp_rcv(skb); return; } } clip_vcc->last_use = jiffies; skb->dev->stats.rx_packets++; skb->dev->stats.rx_bytes += skb- |