Total coverage: 316764 (17%)of 1864617
10 219 10 219 10 14852 14325 14333 14365 5919 14276 14806 14849 14866 14898 14847 7 27 3 3 2 3 3 3 3 2 4 1 3 3 2 3 2 1 2 1 2 1 1 1 1 1 2 2 2 2 14381 14393 14384 16 14402 14401 830 14389 14401 1 2 14387 830 14399 6 4 6 3 4 8 8 5 4 1 8 5 6 8 8 1 6 3 1 3 31 32 7 2 1 1 8 8 2 2 2 6 3 2 1 2 1 1 31 14374 14375 14358 5157 14358 5916 1731 1736 5313 5201 14317 14319 14314 126 14355 11689 321 14321 525 14289 2 14348 11667 14357 14322 14357 14317 2 14350 14266 403 11712 3388 14324 14316 41 316 317 82 14316 11711 3383 446 14257 14357 14319 14314 14317 111 14319 14326 14309 149 14358 14329 11706 11701 572 571 88 14333 5916 14268 464 14808 14851 14837 14815 14475 3 14374 14847 14808 292 14385 14383 14374 14421 14823 14854 14834 14879 14880 5154 14802 14812 14804 14449 14815 14849 5158 14816 5143 14810 14834 14877 207 207 208 209 209 208 209 209 209 208 209 209 209 209 209 208 209 209 209 14307 8957 13307 14307 110 85 14844 45 45 45 45 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 // SPDX-License-Identifier: GPL-2.0-only /* * linux/kernel/printk.c * * Copyright (C) 1991, 1992 Linus Torvalds * * Modified to make sys_syslog() more flexible: added commands to * return the last 4k of kernel messages, regardless of whether * they've been read or not. Added option to suppress kernel printk's * to the console. Added hook for sending the console messages * elsewhere, in preparation for a serial line console (someday). * Ted Ts'o, 2/11/93. * Modified for sysctl support, 1/8/97, Chris Horn. * Fixed SMP synchronization, 08/08/99, Manfred Spraul * manfred@colorfullife.com * Rewrote bits to get rid of console_lock * 01Mar01 Andrew Morton */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/console.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/nmi.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/smp.h> #include <linux/security.h> #include <linux/memblock.h> #include <linux/syscalls.h> #include <linux/syscore_ops.h> #include <linux/vmcore_info.h> #include <linux/ratelimit.h> #include <linux/kmsg_dump.h> #include <linux/syslog.h> #include <linux/cpu.h> #include <linux/rculist.h> #include <linux/poll.h> #include <linux/irq_work.h> #include <linux/ctype.h> #include <linux/uio.h> #include <linux/sched/clock.h> #include <linux/sched/debug.h> #include <linux/sched/task_stack.h> #include <linux/uaccess.h> #include <asm/sections.h> #include <trace/events/initcall.h> #define CREATE_TRACE_POINTS #include <trace/events/printk.h> #include "printk_ringbuffer.h" #include "console_cmdline.h" #include "braille.h" #include "internal.h" int console_printk[4] = { CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */ MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */ CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */ CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */ }; EXPORT_SYMBOL_GPL(console_printk); atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0); EXPORT_SYMBOL(ignore_console_lock_warning); EXPORT_TRACEPOINT_SYMBOL_GPL(console); /* * Low level drivers may need that to know if they can schedule in * their unblank() callback or not. So let's export it. */ int oops_in_progress; EXPORT_SYMBOL(oops_in_progress); /* * console_mutex protects console_list updates and console->flags updates. * The flags are synchronized only for consoles that are registered, i.e. * accessible via the console list. */ static DEFINE_MUTEX(console_mutex); /* * console_sem protects updates to console->seq * and also provides serialization for console printing. */ static DEFINE_SEMAPHORE(console_sem, 1); HLIST_HEAD(console_list); EXPORT_SYMBOL_GPL(console_list); DEFINE_STATIC_SRCU(console_srcu); /* * System may need to suppress printk message under certain * circumstances, like after kernel panic happens. */ int __read_mostly suppress_printk; #ifdef CONFIG_LOCKDEP static struct lockdep_map console_lock_dep_map = { .name = "console_lock" }; void lockdep_assert_console_list_lock_held(void) { lockdep_assert_held(&console_mutex); } EXPORT_SYMBOL(lockdep_assert_console_list_lock_held); #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC bool console_srcu_read_lock_is_held(void) { return srcu_read_lock_held(&console_srcu); } EXPORT_SYMBOL(console_srcu_read_lock_is_held); #endif enum devkmsg_log_bits { __DEVKMSG_LOG_BIT_ON = 0, __DEVKMSG_LOG_BIT_OFF, __DEVKMSG_LOG_BIT_LOCK, }; enum devkmsg_log_masks { DEVKMSG_LOG_MASK_ON = BIT(__DEVKMSG_LOG_BIT_ON), DEVKMSG_LOG_MASK_OFF = BIT(__DEVKMSG_LOG_BIT_OFF), DEVKMSG_LOG_MASK_LOCK = BIT(__DEVKMSG_LOG_BIT_LOCK), }; /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */ #define DEVKMSG_LOG_MASK_DEFAULT 0 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; static int __control_devkmsg(char *str) { size_t len; if (!str) return -EINVAL; len = str_has_prefix(str, "on"); if (len) { devkmsg_log = DEVKMSG_LOG_MASK_ON; return len; } len = str_has_prefix(str, "off"); if (len) { devkmsg_log = DEVKMSG_LOG_MASK_OFF; return len; } len = str_has_prefix(str, "ratelimit"); if (len) { devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT; return len; } return -EINVAL; } static int __init control_devkmsg(char *str) { if (__control_devkmsg(str) < 0) { pr_warn("printk.devkmsg: bad option string '%s'\n", str); return 1; } /* * Set sysctl string accordingly: */ if (devkmsg_log == DEVKMSG_LOG_MASK_ON) strscpy(devkmsg_log_str, "on"); else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF) strscpy(devkmsg_log_str, "off"); /* else "ratelimit" which is set by default. */ /* * Sysctl cannot change it anymore. The kernel command line setting of * this parameter is to force the setting to be permanent throughout the * runtime of the system. This is a precation measure against userspace * trying to be a smarta** and attempting to change it up on us. */ devkmsg_log |= DEVKMSG_LOG_MASK_LOCK; return 1; } __setup("printk.devkmsg=", control_devkmsg); char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit"; #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL) int devkmsg_sysctl_set_loglvl(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { char old_str[DEVKMSG_STR_MAX_SIZE]; unsigned int old; int err; if (write) { if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK) return -EINVAL; old = devkmsg_log; strscpy(old_str, devkmsg_log_str); } err = proc_dostring(table, write, buffer, lenp, ppos); if (err) return err; if (write) { err = __control_devkmsg(devkmsg_log_str); /* * Do not accept an unknown string OR a known string with * trailing crap... */ if (err < 0 || (err + 1 != *lenp)) { /* ... and restore old setting. */ devkmsg_log = old; strscpy(devkmsg_log_str, old_str); return -EINVAL; } } return 0; } #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */ /** * console_list_lock - Lock the console list * * For console list or console->flags updates */ void console_list_lock(void) { /* * In unregister_console() and console_force_preferred_locked(), * synchronize_srcu() is called with the console_list_lock held. * Therefore it is not allowed that the console_list_lock is taken * with the srcu_lock held. * * Detecting if this context is really in the read-side critical * section is only possible if the appropriate debug options are * enabled. */ WARN_ON_ONCE(debug_lockdep_rcu_enabled() && srcu_read_lock_held(&console_srcu)); mutex_lock(&console_mutex); } EXPORT_SYMBOL(console_list_lock); /** * console_list_unlock - Unlock the console list * * Counterpart to console_list_lock() */ void console_list_unlock(void) { mutex_unlock(&console_mutex); } EXPORT_SYMBOL(console_list_unlock); /** * console_srcu_read_lock - Register a new reader for the * SRCU-protected console list * * Use for_each_console_srcu() to iterate the console list * * Context: Any context. * Return: A cookie to pass to console_srcu_read_unlock(). */ int console_srcu_read_lock(void) __acquires(&console_srcu) { return srcu_read_lock_nmisafe(&console_srcu); } EXPORT_SYMBOL(console_srcu_read_lock); /** * console_srcu_read_unlock - Unregister an old reader from * the SRCU-protected console list * @cookie: cookie returned from console_srcu_read_lock() * * Counterpart to console_srcu_read_lock() */ void console_srcu_read_unlock(int cookie) __releases(&console_srcu) { srcu_read_unlock_nmisafe(&console_srcu, cookie); } EXPORT_SYMBOL(console_srcu_read_unlock); /* * Helper macros to handle lockdep when locking/unlocking console_sem. We use * macros instead of functions so that _RET_IP_ contains useful information. */ #define down_console_sem() do { \ down(&console_sem);\ mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\ } while (0) static int __down_trylock_console_sem(unsigned long ip) { int lock_failed; unsigned long flags; /* * Here and in __up_console_sem() we need to be in safe mode, * because spindump/WARN/etc from under console ->lock will * deadlock in printk()->down_trylock_console_sem() otherwise. */ printk_safe_enter_irqsave(flags); lock_failed = down_trylock(&console_sem); printk_safe_exit_irqrestore(flags); if (lock_failed) return 1; mutex_acquire(&console_lock_dep_map, 0, 1, ip); return 0; } #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_) static void __up_console_sem(unsigned long ip) { unsigned long flags; mutex_release(&console_lock_dep_map, ip); printk_safe_enter_irqsave(flags); up(&console_sem); printk_safe_exit_irqrestore(flags); } #define up_console_sem() __up_console_sem(_RET_IP_) static bool panic_in_progress(void) { return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID); } /* Return true if a panic is in progress on the current CPU. */ bool this_cpu_in_panic(void) { /* * We can use raw_smp_processor_id() here because it is impossible for * the task to be migrated to the panic_cpu, or away from it. If * panic_cpu has already been set, and we're not currently executing on * that CPU, then we never will be. */ return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id()); } /* * Return true if a panic is in progress on a remote CPU. * * On true, the local CPU should immediately release any printing resources * that may be needed by the panic CPU. */ bool other_cpu_in_panic(void) { return (panic_in_progress() && !this_cpu_in_panic()); } /* * This is used for debugging the mess that is the VT code by * keeping track if we have the console semaphore held. It's * definitely not the perfect debug tool (we don't know if _WE_ * hold it and are racing, but it helps tracking those weird code * paths in the console code where we end up in places I want * locked without the console semaphore held). */ static int console_locked; /* * Array of consoles built from command line options (console=) */ #define MAX_CMDLINECONSOLES 8 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; static int preferred_console = -1; int console_set_on_cmdline; EXPORT_SYMBOL(console_set_on_cmdline); /* Flag: console code may call schedule() */ static int console_may_schedule; enum con_msg_format_flags { MSG_FORMAT_DEFAULT = 0, MSG_FORMAT_SYSLOG = (1 << 0), }; static int console_msg_format = MSG_FORMAT_DEFAULT; /* * The printk log buffer consists of a sequenced collection of records, each * containing variable length message text. Every record also contains its * own meta-data (@info). * * Every record meta-data carries the timestamp in microseconds, as well as * the standard userspace syslog level and syslog facility. The usual kernel * messages use LOG_KERN; userspace-injected messages always carry a matching * syslog facility, by default LOG_USER. The origin of every message can be * reliably determined that way. * * The human readable log message of a record is available in @text, the * length of the message text in @text_len. The stored message is not * terminated. * * Optionally, a record can carry a dictionary of properties (key/value * pairs), to provide userspace with a machine-readable message context. * * Examples for well-defined, commonly used property names are: * DEVICE=b12:8 device identifier * b12:8 block dev_t * c127:3 char dev_t * n8 netdev ifindex * +sound:card0 subsystem:devname * SUBSYSTEM=pci driver-core subsystem name * * Valid characters in property names are [a-zA-Z0-9.-_]. Property names * and values are terminated by a '\0' character. * * Example of record values: * record.text_buf = "it's a line" (unterminated) * record.info.seq = 56 * record.info.ts_nsec = 36863 * record.info.text_len = 11 * record.info.facility = 0 (LOG_KERN) * record.info.flags = 0 * record.info.level = 3 (LOG_ERR) * record.info.caller_id = 299 (task 299) * record.info.dev_info.subsystem = "pci" (terminated) * record.info.dev_info.device = "+pci:0000:00:01.0" (terminated) * * The 'struct printk_info' buffer must never be directly exported to * userspace, it is a kernel-private implementation detail that might * need to be changed in the future, when the requirements change. * * /dev/kmsg exports the structured data in the following line format: * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" * * Users of the export format should ignore possible additional values * separated by ',', and find the message after the ';' character. * * The optional key/value pairs are attached as continuation lines starting * with a space character and terminated by a newline. All possible * non-prinatable characters are escaped in the "\xff" notation. */ /* syslog_lock protects syslog_* variables and write access to clear_seq. */ static DEFINE_MUTEX(syslog_lock); /* * Specifies if a legacy console is registered. If legacy consoles are * present, it is necessary to perform the console lock/unlock dance * whenever console flushing should occur. */ bool have_legacy_console; /* * Specifies if an nbcon console is registered. If nbcon consoles are present, * synchronous printing of legacy consoles will not occur during panic until * the backtrace has been stored to the ringbuffer. */ bool have_nbcon_console; /* * Specifies if a boot console is registered. If boot consoles are present, * nbcon consoles cannot print simultaneously and must be synchronized by * the console lock. This is because boot consoles and nbcon consoles may * have mapped the same hardware. */ bool have_boot_console; /* See printk_legacy_allow_panic_sync() for details. */ bool legacy_allow_panic_sync; #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); static DECLARE_WAIT_QUEUE_HEAD(legacy_wait); /* All 3 protected by @syslog_lock. */ /* the next printk record to read by syslog(READ) or /proc/kmsg */ static u64 syslog_seq; static size_t syslog_partial; static bool syslog_time; /* True when _all_ printer threads are available for printing. */ bool printk_kthreads_running; struct latched_seq { seqcount_latch_t latch; u64 val[2]; }; /* * The next printk record to read after the last 'clear' command. There are * two copies (updated with seqcount_latch) so that reads can locklessly * access a valid value. Writers are synchronized by @syslog_lock. */ static struct latched_seq clear_seq = { .latch = SEQCNT_LATCH_ZERO(clear_seq.latch), .val[0] = 0, .val[1] = 0, }; #define LOG_LEVEL(v) ((v) & 0x07) #define LOG_FACILITY(v) ((v) >> 3 & 0xff) /* record buffer */ #define LOG_ALIGN __alignof__(unsigned long) #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) #define LOG_BUF_LEN_MAX ((u32)1 << 31) static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); static char *log_buf = __log_buf; static u32 log_buf_len = __LOG_BUF_LEN; /* * Define the average message size. This only affects the number of * descriptors that will be available. Underestimating is better than * overestimating (too many available descriptors is better than not enough). */ #define PRB_AVGBITS 5 /* 32 character average length */ #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS #error CONFIG_LOG_BUF_SHIFT value too small. #endif _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS, PRB_AVGBITS, &__log_buf[0]); static struct printk_ringbuffer printk_rb_dynamic; struct printk_ringbuffer *prb = &printk_rb_static; /* * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before * per_cpu_areas are initialised. This variable is set to true when * it's safe to access per-CPU data. */ static bool __printk_percpu_data_ready __ro_after_init; bool printk_percpu_data_ready(void) { return __printk_percpu_data_ready; } /* Must be called under syslog_lock. */ static void latched_seq_write(struct latched_seq *ls, u64 val) { write_seqcount_latch_begin(&ls->latch); ls->val[0] = val; write_seqcount_latch(&ls->latch); ls->val[1] = val; write_seqcount_latch_end(&ls->latch); } /* Can be called from any context. */ static u64 latched_seq_read_nolock(struct latched_seq *ls) { unsigned int seq; unsigned int idx; u64 val; do { seq = read_seqcount_latch(&ls->latch); idx = seq & 0x1; val = ls->val[idx]; } while (read_seqcount_latch_retry(&ls->latch, seq)); return val; } /* Return log buffer address */ char *log_buf_addr_get(void) { return log_buf; } /* Return log buffer size */ u32 log_buf_len_get(void) { return log_buf_len; } /* * Define how much of the log buffer we could take at maximum. The value * must be greater than two. Note that only half of the buffer is available * when the index points to the middle. */ #define MAX_LOG_TAKE_PART 4 static const char trunc_msg[] = "<truncated>"; static void truncate_msg(u16 *text_len, u16 *trunc_msg_len) { /* * The message should not take the whole buffer. Otherwise, it might * get removed too soon. */ u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART; if (*text_len > max_text_len) *text_len = max_text_len; /* enable the warning message (if there is room) */ *trunc_msg_len = strlen(trunc_msg); if (*text_len >= *trunc_msg_len) *text_len -= *trunc_msg_len; else *trunc_msg_len = 0; } int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT); static int syslog_action_restricted(int type) { if (dmesg_restrict) return 1; /* * Unless restricted, we allow "read all" and "get buffer size" * for everybody. */ return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER; } static int check_syslog_permissions(int type, int source) { /* * If this is from /proc/kmsg and we've already opened it, then we've * already done the capabilities checks at open time. */ if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN) goto ok; if (syslog_action_restricted(type)) { if (capable(CAP_SYSLOG)) goto ok; return -EPERM; } ok: return security_syslog(type); } static void append_char(char **pp, char *e, char c) { if (*pp < e) *(*pp)++ = c; } static ssize_t info_print_ext_header(char *buf, size_t size, struct printk_info *info) { u64 ts_usec = info->ts_nsec; char caller[20]; #ifdef CONFIG_PRINTK_CALLER u32 id = info->caller_id; snprintf(caller, sizeof(caller), ",caller=%c%u", id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); #else caller[0] = '\0'; #endif do_div(ts_usec, 1000); return scnprintf(buf, size, "%u,%llu,%llu,%c%s;", (info->facility << 3) | info->level, info->seq, ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller); } static ssize_t msg_add_ext_text(char *buf, size_t size, const char *text, size_t text_len, unsigned char endc) { char *p = buf, *e = buf + size; size_t i; /* escape non-printable characters */ for (i = 0; i < text_len; i++) { unsigned char c = text[i]; if (c < ' ' || c >= 127 || c == '\\') p += scnprintf(p, e - p, "\\x%02x", c); else append_char(&p, e, c); } append_char(&p, e, endc); return p - buf; } static ssize_t msg_add_dict_text(char *buf, size_t size, const char *key, const char *val) { size_t val_len = strlen(val); ssize_t len; if (!val_len) return 0; len = msg_add_ext_text(buf, size, "", 0, ' '); /* dict prefix */ len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '='); len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n'); return len; } static ssize_t msg_print_ext_body(char *buf, size_t size, char *text, size_t text_len, struct dev_printk_info *dev_info) { ssize_t len; len = msg_add_ext_text(buf, size, text, text_len, '\n'); if (!dev_info) goto out; len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM", dev_info->subsystem); len += msg_add_dict_text(buf + len, size - len, "DEVICE", dev_info->device); out: return len; } /* /dev/kmsg - userspace message inject/listen interface */ struct devkmsg_user { atomic64_t seq; struct ratelimit_state rs; struct mutex lock; struct printk_buffers pbufs; }; static __printf(3, 4) __cold int devkmsg_emit(int facility, int level, const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = vprintk_emit(facility, level, NULL, fmt, args); va_end(args); return r; } static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) { char *buf, *line; int level = default_message_loglevel; int facility = 1; /* LOG_USER */ struct file *file = iocb->ki_filp; struct devkmsg_user *user = file->private_data; size_t len = iov_iter_count(from); ssize_t ret = len; if (len > PRINTKRB_RECORD_MAX) return -EINVAL; /* Ignore when user logging is disabled. */ if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) return len; /* Ratelimit when not explicitly enabled. */ if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) { if (!___ratelimit(&user->rs, current->comm)) return ret; } buf = kmalloc(len+1, GFP_KERNEL); if (buf == NULL) return -ENOMEM; buf[len] = '\0'; if (!copy_from_iter_full(buf, len, from)) { kfree(buf); return -EFAULT; } /* * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace * the decimal value represents 32bit, the lower 3 bit are the log * level, the rest are the log facility. * * If no prefix or no userspace facility is specified, we * enforce LOG_USER, to be able to reliably distinguish * kernel-generated messages from userspace-injected ones. */ line = buf; if (line[0] == '<') { char *endp = NULL; unsigned int u; u = simple_strtoul(line + 1, &endp, 10); if (endp && endp[0] == '>') { level = LOG_LEVEL(u); if (LOG_FACILITY(u) != 0) facility = LOG_FACILITY(u); endp++; line = endp; } } devkmsg_emit(facility, level, "%s", line); kfree(buf); return ret; } static ssize_t devkmsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct devkmsg_user *user = file->private_data; char *outbuf = &user->pbufs.outbuf[0]; struct printk_message pmsg = { .pbufs = &user->pbufs, }; ssize_t ret; ret = mutex_lock_interruptible(&user->lock); if (ret) return ret; if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) { if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; goto out; } /* * Guarantee this task is visible on the waitqueue before * checking the wake condition. * * The full memory barrier within set_current_state() of * prepare_to_wait_event() pairs with the full memory barrier * within wq_has_sleeper(). * * This pairs with __wake_up_klogd:A. */ ret = wait_event_interruptible(log_wait, printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)); /* LMM(devkmsg_read:A) */ if (ret) goto out; } if (pmsg.dropped) { /* our last seen message is gone, return error and reset */ atomic64_set(&user->seq, pmsg.seq); ret = -EPIPE; goto out; } atomic64_set(&user->seq, pmsg.seq + 1); if (pmsg.outbuf_len > count) { ret = -EINVAL; goto out; } if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) { ret = -EFAULT; goto out; } ret = pmsg.outbuf_len; out: mutex_unlock(&user->lock); return ret; } /* * Be careful when modifying this function!!! * * Only few operations are supported because the device works only with the * entire variable length messages (records). Non-standard values are * returned in the other cases and has been this way for quite some time. * User space applications might depend on this behavior. */ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) { struct devkmsg_user *user = file->private_data; loff_t ret = 0; if (offset) return -ESPIPE; switch (whence) { case SEEK_SET: /* the first record */ atomic64_set(&user->seq, prb_first_valid_seq(prb)); break; case SEEK_DATA: /* * The first record after the last SYSLOG_ACTION_CLEAR, * like issued by 'dmesg -c'. Reading /dev/kmsg itself * changes no global state, and does not clear anything. */ atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq)); break; case SEEK_END: /* after the last record */ atomic64_set(&user->seq, prb_next_seq(prb)); break; default: ret = -EINVAL; } return ret; } static __poll_t devkmsg_poll(struct file *file, poll_table *wait) { struct devkmsg_user *user = file->private_data; struct printk_info info; __poll_t ret = 0; poll_wait(file, &log_wait, wait); if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) { /* return error when data has vanished underneath us */ if (info.seq != atomic64_read(&user->seq)) ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; else ret = EPOLLIN|EPOLLRDNORM; } return ret; } static int devkmsg_open(struct inode *inode, struct file *file) { struct devkmsg_user *user; int err; if (devkmsg_log & DEVKMSG_LOG_MASK_OFF) return -EPERM; /* write-only does not need any file context */ if ((file->f_flags & O_ACCMODE) != O_WRONLY) { err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL, SYSLOG_FROM_READER); if (err) return err; } user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL); if (!user) return -ENOMEM; ratelimit_default_init(&user->rs); ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE); mutex_init(&user->lock); atomic64_set(&user->seq, prb_first_valid_seq(prb)); file->private_data = user; return 0; } static int devkmsg_release(struct inode *inode, struct file *file) { struct devkmsg_user *user = file->private_data; ratelimit_state_exit(&user->rs); mutex_destroy(&user->lock); kvfree(user); return 0; } const struct file_operations kmsg_fops = { .open = devkmsg_open, .read = devkmsg_read, .write_iter = devkmsg_write, .llseek = devkmsg_llseek, .poll = devkmsg_poll, .release = devkmsg_release, }; #ifdef CONFIG_VMCORE_INFO /* * This appends the listed symbols to /proc/vmcore * * /proc/vmcore is used by various utilities, like crash and makedumpfile to * obtain access to symbols that are otherwise very difficult to locate. These * symbols are specifically used so that utilities can access and extract the * dmesg log from a vmcore file after a crash. */ void log_buf_vmcoreinfo_setup(void) { struct dev_printk_info *dev_info = NULL; VMCOREINFO_SYMBOL(prb); VMCOREINFO_SYMBOL(printk_rb_static); VMCOREINFO_SYMBOL(clear_seq); /* * Export struct size and field offsets. User space tools can * parse it and detect any changes to structure down the line. */ VMCOREINFO_STRUCT_SIZE(printk_ringbuffer); VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring); VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring); VMCOREINFO_OFFSET(printk_ringbuffer, fail); VMCOREINFO_STRUCT_SIZE(prb_desc_ring); VMCOREINFO_OFFSET(prb_desc_ring, count_bits); VMCOREINFO_OFFSET(prb_desc_ring, descs); VMCOREINFO_OFFSET(prb_desc_ring, infos); VMCOREINFO_OFFSET(prb_desc_ring, head_id); VMCOREINFO_OFFSET(prb_desc_ring, tail_id); VMCOREINFO_STRUCT_SIZE(prb_desc); VMCOREINFO_OFFSET(prb_desc, state_var); VMCOREINFO_OFFSET(prb_desc, text_blk_lpos); VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos); VMCOREINFO_OFFSET(prb_data_blk_lpos, begin); VMCOREINFO_OFFSET(prb_data_blk_lpos, next); VMCOREINFO_STRUCT_SIZE(printk_info); VMCOREINFO_OFFSET(printk_info, seq); VMCOREINFO_OFFSET(printk_info, ts_nsec); VMCOREINFO_OFFSET(printk_info, text_len); VMCOREINFO_OFFSET(printk_info, caller_id); VMCOREINFO_OFFSET(printk_info, dev_info); VMCOREINFO_STRUCT_SIZE(dev_printk_info); VMCOREINFO_OFFSET(dev_printk_info, subsystem); VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem)); VMCOREINFO_OFFSET(dev_printk_info, device); VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device)); VMCOREINFO_STRUCT_SIZE(prb_data_ring); VMCOREINFO_OFFSET(prb_data_ring, size_bits); VMCOREINFO_OFFSET(prb_data_ring, data); VMCOREINFO_OFFSET(prb_data_ring, head_lpos); VMCOREINFO_OFFSET(prb_data_ring, tail_lpos); VMCOREINFO_SIZE(atomic_long_t); VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter); VMCOREINFO_STRUCT_SIZE(latched_seq); VMCOREINFO_OFFSET(latched_seq, val); } #endif /* requested log_buf_len from kernel cmdline */ static unsigned long __initdata new_log_buf_len; /* we practice scaling the ring buffer by powers of 2 */ static void __init log_buf_len_update(u64 size) { if (size > (u64)LOG_BUF_LEN_MAX) { size = (u64)LOG_BUF_LEN_MAX; pr_err("log_buf over 2G is not supported.\n"); } if (size) size = roundup_pow_of_two(size); if (size > log_buf_len) new_log_buf_len = (unsigned long)size; } /* save requested log_buf_len since it's too early to process it */ static int __init log_buf_len_setup(char *str) { u64 size; if (!str) return -EINVAL; size = memparse(str, &str); log_buf_len_update(size); return 0; } early_param("log_buf_len", log_buf_len_setup); #ifdef CONFIG_SMP #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT) static void __init log_buf_add_cpu(void) { unsigned int cpu_extra; /* * archs should set up cpu_possible_bits properly with * set_cpu_possible() after setup_arch() but just in * case lets ensure this is valid. */ if (num_possible_cpus() == 1) return; cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN; /* by default this will only continue through for large > 64 CPUs */ if (cpu_extra <= __LOG_BUF_LEN / 2) return; pr_info("log_buf_len individual max cpu contribution: %d bytes\n", __LOG_CPU_MAX_BUF_LEN); pr_info("log_buf_len total cpu_extra contributions: %d bytes\n", cpu_extra); pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN); log_buf_len_update(cpu_extra + __LOG_BUF_LEN); } #else /* !CONFIG_SMP */ static inline void log_buf_add_cpu(void) {} #endif /* CONFIG_SMP */ static void __init set_percpu_data_ready(void) { __printk_percpu_data_ready = true; } static unsigned int __init add_to_rb(struct printk_ringbuffer *rb, struct printk_record *r) { struct prb_reserved_entry e; struct printk_record dest_r; prb_rec_init_wr(&dest_r, r->info->text_len); if (!prb_reserve(&e, rb, &dest_r)) return 0; memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len); dest_r.info->text_len = r->info->text_len; dest_r.info->facility = r->info->facility; dest_r.info->level = r->info->level; dest_r.info->flags = r->info->flags; dest_r.info->ts_nsec = r->info->ts_nsec; dest_r.info->caller_id = r->info->caller_id; memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info)); prb_final_commit(&e); return prb_record_text_space(&e); } static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata; static void print_log_buf_usage_stats(void) { unsigned int descs_count = log_buf_len >> PRB_AVGBITS; size_t meta_data_size; meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info)); pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n", log_buf_len, meta_data_size, log_buf_len + meta_data_size); } void __init setup_log_buf(int early) { struct printk_info *new_infos; unsigned int new_descs_count; struct prb_desc *new_descs; struct printk_info info; struct printk_record r; unsigned int text_size; size_t new_descs_size; size_t new_infos_size; unsigned long flags; char *new_log_buf; unsigned int free; u64 seq; /* * Some archs call setup_log_buf() multiple times - first is very * early, e.g. from setup_arch(), and second - when percpu_areas * are initialised. */ if (!early) set_percpu_data_ready(); if (log_buf != __log_buf) return; if (!early && !new_log_buf_len) log_buf_add_cpu(); if (!new_log_buf_len) { /* Show the memory stats only once. */ if (!early) goto out; return; } new_descs_count = new_log_buf_len >> PRB_AVGBITS; if (new_descs_count == 0) { pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len); goto out; } new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN); if (unlikely(!new_log_buf)) { pr_err("log_buf_len: %lu text bytes not available\n", new_log_buf_len); goto out; } new_descs_size = new_descs_count * sizeof(struct prb_desc); new_descs = memblock_alloc(new_descs_size, LOG_ALIGN); if (unlikely(!new_descs)) { pr_err("log_buf_len: %zu desc bytes not available\n", new_descs_size); goto err_free_log_buf; } new_infos_size = new_descs_count * sizeof(struct printk_info); new_infos = memblock_alloc(new_infos_size, LOG_ALIGN); if (unlikely(!new_infos)) { pr_err("log_buf_len: %zu info bytes not available\n", new_infos_size); goto err_free_descs; } prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf)); prb_init(&printk_rb_dynamic, new_log_buf, ilog2(new_log_buf_len), new_descs, ilog2(new_descs_count), new_infos); local_irq_save(flags); log_buf_len = new_log_buf_len; log_buf = new_log_buf; new_log_buf_len = 0; free = __LOG_BUF_LEN; prb_for_each_record(0, &printk_rb_static, seq, &r) { text_size = add_to_rb(&printk_rb_dynamic, &r); if (text_size > free) free = 0; else free -= text_size; } prb = &printk_rb_dynamic; local_irq_restore(flags); /* * Copy any remaining messages that might have appeared from * NMI context after copying but before switching to the * dynamic buffer. */ prb_for_each_record(seq, &printk_rb_static, seq, &r) { text_size = add_to_rb(&printk_rb_dynamic, &r); if (text_size > free) free = 0; else free -= text_size; } if (seq != prb_next_seq(&printk_rb_static)) { pr_err("dropped %llu messages\n", prb_next_seq(&printk_rb_static) - seq); } print_log_buf_usage_stats(); pr_info("early log buf free: %u(%u%%)\n", free, (free * 100) / __LOG_BUF_LEN); return; err_free_descs: memblock_free(new_descs, new_descs_size); err_free_log_buf: memblock_free(new_log_buf, new_log_buf_len); out: print_log_buf_usage_stats(); } static bool __read_mostly ignore_loglevel; static int __init ignore_loglevel_setup(char *str) { ignore_loglevel = true; pr_info("debug: ignoring loglevel setting.\n"); return 0; } early_param("ignore_loglevel", ignore_loglevel_setup); module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting (prints all kernel messages to the console)"); static bool suppress_message_printing(int level) { return (level >= console_loglevel && !ignore_loglevel); } #ifdef CONFIG_BOOT_PRINTK_DELAY static int boot_delay; /* msecs delay after each printk during bootup */ static unsigned long long loops_per_msec; /* based on boot_delay */ static int __init boot_delay_setup(char *str) { unsigned long lpj; lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */ loops_per_msec = (unsigned long long)lpj / 1000 * HZ; get_option(&str, &boot_delay); if (boot_delay > 10 * 1000) boot_delay = 0; pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, " "HZ: %d, loops_per_msec: %llu\n", boot_delay, preset_lpj, lpj, HZ, loops_per_msec); return 0; } early_param("boot_delay", boot_delay_setup); static void boot_delay_msec(int level) { unsigned long long k; unsigned long timeout; bool suppress = !is_printk_force_console() && suppress_message_printing(level); if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING) || suppress) return; k = (unsigned long long)loops_per_msec * boot_delay; timeout = jiffies + msecs_to_jiffies(boot_delay); while (k) { k--; cpu_relax(); /* * use (volatile) jiffies to prevent * compiler reduction; loop termination via jiffies * is secondary and may or may not happen. */ if (time_after(jiffies, timeout)) break; touch_nmi_watchdog(); } } #else static inline void boot_delay_msec(int level) { } #endif static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME); module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); static size_t print_syslog(unsigned int level, char *buf) { return sprintf(buf, "<%u>", level); } static size_t print_time(u64 ts, char *buf) { unsigned long rem_nsec = do_div(ts, 1000000000); return sprintf(buf, "[%5lu.%06lu]", (unsigned long)ts, rem_nsec / 1000); } #ifdef CONFIG_PRINTK_CALLER static size_t print_caller(u32 id, char *buf) { char caller[12]; snprintf(caller, sizeof(caller), "%c%u", id & 0x80000000 ? 'C' : 'T', id & ~0x80000000); return sprintf(buf, "[%6s]", caller); } #else #define print_caller(id, buf) 0 #endif static size_t info_print_prefix(const struct printk_info *info, bool syslog, bool time, char *buf) { size_t len = 0; if (syslog) len = print_syslog((info->facility << 3) | info->level, buf); if (time) len += print_time(info->ts_nsec, buf + len); len += print_caller(info->caller_id, buf + len); if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) { buf[len++] = ' '; buf[len] = '\0'; } return len; } /* * Prepare the record for printing. The text is shifted within the given * buffer to avoid a need for another one. The following operations are * done: * * - Add prefix for each line. * - Drop truncated lines that no longer fit into the buffer. * - Add the trailing newline that has been removed in vprintk_store(). * - Add a string terminator. * * Since the produced string is always terminated, the maximum possible * return value is @r->text_buf_size - 1; * * Return: The length of the updated/prepared text, including the added * prefixes and the newline. The terminator is not counted. The dropped * line(s) are not counted. */ static size_t record_print_text(struct printk_record *r, bool syslog, bool time) { size_t text_len = r->info->text_len; size_t buf_size = r->text_buf_size; char *text = r->text_buf; char prefix[PRINTK_PREFIX_MAX]; bool truncated = false; size_t prefix_len; size_t line_len; size_t len = 0; char *next; /* * If the message was truncated because the buffer was not large * enough, treat the available text as if it were the full text. */ if (text_len > buf_size) text_len = buf_size; prefix_len = info_print_prefix(r->info, syslog, time, prefix); /* * @text_len: bytes of unprocessed text * @line_len: bytes of current line _without_ newline * @text: pointer to beginning of current line * @len: number of bytes prepared in r->text_buf */ for (;;) { next = memchr(text, '\n', text_len); if (next) { line_len = next - text; } else { /* Drop truncated line(s). */ if (truncated) break; line_len = text_len; } /* * Truncate the text if there is not enough space to add the * prefix and a trailing newline and a terminator. */ if (len + prefix_len + text_len + 1 + 1 > buf_size) { /* Drop even the current line if no space. */ if (len + prefix_len + line_len + 1 + 1 > buf_size) break; text_len = buf_size - len - prefix_len - 1 - 1; truncated = true; } memmove(text + prefix_len, text, text_len); memcpy(text, prefix, prefix_len); /* * Increment the prepared length to include the text and * prefix that were just moved+copied. Also increment for the * newline at the end of this line. If this is the last line, * there is no newline, but it will be added immediately below. */ len += prefix_len + line_len + 1; if (text_len == line_len) { /* * This is the last line. Add the trailing newline * removed in vprintk_store(). */ text[prefix_len + line_len] = '\n'; break; } /* * Advance beyond the added prefix and the related line with * its newline. */ text += prefix_len + line_len + 1; /* * The remaining text has only decreased by the line with its * newline. * * Note that @text_len can become zero. It happens when @text * ended with a newline (either due to truncation or the * original string ending with "\n\n"). The loop is correctly * repeated and (if not truncated) an empty line with a prefix * will be prepared. */ text_len -= line_len + 1; } /* * If a buffer was provided, it will be terminated. Space for the * string terminator is guaranteed to be available. The terminator is * not counted in the return value. */ if (buf_size > 0) r->text_buf[len] = 0; return len; } static size_t get_record_print_text_size(struct printk_info *info, unsigned int line_count, bool syslog, bool time) { char prefix[PRINTK_PREFIX_MAX]; size_t prefix_len; prefix_len = info_print_prefix(info, syslog, time, prefix); /* * Each line will be preceded with a prefix. The intermediate * newlines are already within the text, but a final trailing * newline will be added. */ return ((prefix_len * line_count) + info->text_len + 1); } /* * Beginning with @start_seq, find the first record where it and all following * records up to (but not including) @max_seq fit into @size. * * @max_seq is simply an upper bound and does not need to exist. If the caller * does not require an upper bound, -1 can be used for @max_seq. */ static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size, bool syslog, bool time) { struct printk_info info; unsigned int line_count; size_t len = 0; u64 seq; /* Determine the size of the records up to @max_seq. */ prb_for_each_info(start_seq, prb, seq, &info, &line_count) { if (info.seq >= max_seq) break; len += get_record_print_text_size(&info, line_count, syslog, time); } /* * Adjust the upper bound for the next loop to avoid subtracting * lengths that were never added. */ if (seq < max_seq) max_seq = seq; /* * Move first record forward until length fits into the buffer. Ignore * newest messages that were not counted in the above cycle. Messages * might appear and get lost in the meantime. This is a best effort * that prevents an infinite loop that could occur with a retry. */ prb_for_each_info(start_seq, prb, seq, &info, &line_count) { if (len <= size || info.seq >= max_seq) break; len -= get_record_print_text_size(&info, line_count, syslog, time); } return seq; } /* The caller is responsible for making sure @size is greater than 0. */ static int syslog_print(char __user *buf, int size) { struct printk_info info; struct printk_record r; char *text; int len = 0; u64 seq; text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL); if (!text) return -ENOMEM; prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX); mutex_lock(&syslog_lock); /* * Wait for the @syslog_seq record to be available. @syslog_seq may * change while waiting. */ do { seq = syslog_seq; mutex_unlock(&syslog_lock); /* * Guarantee this task is visible on the waitqueue before * checking the wake condition. * * The full memory barrier within set_current_state() of * prepare_to_wait_event() pairs with the full memory barrier * within wq_has_sleeper(). * * This pairs with __wake_up_klogd:A. */ len = wait_event_interruptible(log_wait, prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */ mutex_lock(&syslog_lock); if (len) goto out; } while (syslog_seq != seq); /* * Copy records that fit into the buffer. The above cycle makes sure * that the first record is always available. */ do { size_t n; size_t skip; int err; if (!prb_read_valid(prb, syslog_seq, &r)) break; if (r.info->seq != syslog_seq) { /* message is gone, move to next valid one */ syslog_seq = r.info->seq; syslog_partial = 0; } /* * To keep reading/counting partial line consistent, * use printk_time value as of the beginning of a line. */ if (!syslog_partial) syslog_time = printk_time; skip = syslog_partial; n = record_print_text(&r, true, syslog_time); if (n - syslog_partial <= size) { /* message fits into buffer, move forward */ syslog_seq = r.info->seq + 1; n -= syslog_partial; syslog_partial = 0; } else if (!len){ /* partial read(), remember position */ n = size; syslog_partial += n; } else n = 0; if (!n) break; mutex_unlock(&syslog_lock); err = copy_to_user(buf, text + skip, n); mutex_lock(&syslog_lock); if (err) { if (!len) len = -EFAULT; break; } len += n; size -= n; buf += n; } while (size); out: mutex_unlock(&syslog_lock); kfree(text); return len; } static int syslog_print_all(char __user *buf, int size, bool clear) { struct printk_info info; struct printk_record r; char *text; int len = 0; u64 seq; bool time; text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL); if (!text) return -ENOMEM; time = printk_time; /* * Find first record that fits, including all following records, * into the user-provided buffer for this dump. */ seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1, size, true, time); prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX); prb_for_each_record(seq, prb, seq, &r) { int textlen; textlen = record_print_text(&r, true, time); if (len + textlen > size) { seq--; break; } if (copy_to_user(buf + len, text, textlen)) len = -EFAULT; else len += textlen; if (len < 0) break; } if (clear) { mutex_lock(&syslog_lock); latched_seq_write(&clear_seq, seq); mutex_unlock(&syslog_lock); } kfree(text); return len; } static void syslog_clear(void) { mutex_lock(&syslog_lock); latched_seq_write(&clear_seq, prb_next_seq(prb)); mutex_unlock(&syslog_lock); } int do_syslog(int type, char __user *buf, int len, int source) { struct printk_info info; bool clear = false; static int saved_console_loglevel = LOGLEVEL_DEFAULT; int error; error = check_syslog_permissions(type, source); if (error) return error; switch (type) { case SYSLOG_ACTION_CLOSE: /* Close log */ break; case SYSLOG_ACTION_OPEN: /* Open log */ break; case SYSLOG_ACTION_READ: /* Read from log */ if (!buf || len < 0) return -EINVAL; if (!len) return 0; if (!access_ok(buf, len)) return -EFAULT; error = syslog_print(buf, len); break; /* Read/clear last kernel messages */ case SYSLOG_ACTION_READ_CLEAR: clear = true; fallthrough; /* Read last kernel messages */ case SYSLOG_ACTION_READ_ALL: if (!buf || len < 0) return -EINVAL; if (!len) return 0; if (!access_ok(buf, len)) return -EFAULT; error = syslog_print_all(buf, len, clear); break; /* Clear ring buffer */ case SYSLOG_ACTION_CLEAR: syslog_clear(); break; /* Disable logging to console */ case SYSLOG_ACTION_CONSOLE_OFF: if (saved_console_loglevel == LOGLEVEL_DEFAULT) saved_console_loglevel = console_loglevel; console_loglevel = minimum_console_loglevel; break; /* Enable logging to console */ case SYSLOG_ACTION_CONSOLE_ON: if (saved_console_loglevel != LOGLEVEL_DEFAULT) { console_loglevel = saved_console_loglevel; saved_console_loglevel = LOGLEVEL_DEFAULT; } break; /* Set level of messages printed to console */ case SYSLOG_ACTION_CONSOLE_LEVEL: if (len < 1 || len > 8) return -EINVAL; if (len < minimum_console_loglevel) len = minimum_console_loglevel; console_loglevel = len; /* Implicitly re-enable logging to console */ saved_console_loglevel = LOGLEVEL_DEFAULT; break; /* Number of chars in the log buffer */ case SYSLOG_ACTION_SIZE_UNREAD: mutex_lock(&syslog_lock); if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) { /* No unread messages. */ mutex_unlock(&syslog_lock); return 0; } if (info.seq != syslog_seq) { /* messages are gone, move to first one */ syslog_seq = info.seq; syslog_partial = 0; } if (source == SYSLOG_FROM_PROC) { /* * Short-cut for poll(/"proc/kmsg") which simply checks * for pending data, not the size; return the count of * records, not the length. */ error = prb_next_seq(prb) - syslog_seq; } else { bool time = syslog_partial ? syslog_time : printk_time; unsigned int line_count; u64 seq; prb_for_each_info(syslog_seq, prb, seq, &info, &line_count) { error += get_record_print_text_size(&info, line_count, true, time); time = printk_time; } error -= syslog_partial; } mutex_unlock(&syslog_lock); break; /* Size of the log buffer */ case SYSLOG_ACTION_SIZE_BUFFER: error = log_buf_len; break; default: error = -EINVAL; break; } return error; } SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len) { return do_syslog(type, buf, len, SYSLOG_FROM_READER); } /* * Special console_lock variants that help to reduce the risk of soft-lockups. * They allow to pass console_lock to another printk() call using a busy wait. */ #ifdef CONFIG_LOCKDEP static struct lockdep_map console_owner_dep_map = { .name = "console_owner" }; #endif static DEFINE_RAW_SPINLOCK(console_owner_lock); static struct task_struct *console_owner; static bool console_waiter; /** * console_lock_spinning_enable - mark beginning of code where another * thread might safely busy wait * * This basically converts console_lock into a spinlock. This marks * the section where the console_lock owner can not sleep, because * there may be a waiter spinning (like a spinlock). Also it must be * ready to hand over the lock at the end of the section. */ void console_lock_spinning_enable(void) { /* * Do not use spinning in panic(). The panic CPU wants to keep the lock. * Non-panic CPUs abandon the flush anyway. * * Just keep the lockdep annotation. The panic-CPU should avoid * taking console_owner_lock because it might cause a deadlock. * This looks like the easiest way how to prevent false lockdep * reports without handling races a lockless way. */ if (panic_in_progress()) goto lockdep; raw_spin_lock(&console_owner_lock); console_owner = current; raw_spin_unlock(&console_owner_lock); lockdep: /* The waiter may spin on us after setting console_owner */ spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); } /** * console_lock_spinning_disable_and_check - mark end of code where another * thread was able to busy wait and check if there is a waiter * @cookie: cookie returned from console_srcu_read_lock() * * This is called at the end of the section where spinning is allowed. * It has two functions. First, it is a signal that it is no longer * safe to start busy waiting for the lock. Second, it checks if * there is a busy waiter and passes the lock rights to her. * * Important: Callers lose both the console_lock and the SRCU read lock if * there was a busy waiter. They must not touch items synchronized by * console_lock or SRCU read lock in this case. * * Return: 1 if the lock rights were passed, 0 otherwise. */ int console_lock_spinning_disable_and_check(int cookie) { int waiter; /* * Ignore spinning waiters during panic() because they might get stopped * or blocked at any time, * * It is safe because nobody is allowed to start spinning during panic * in the first place. If there has been a waiter then non panic CPUs * might stay spinning. They would get stopped anyway. The panic context * will never start spinning and an interrupted spin on panic CPU will * never continue. */ if (panic_in_progress()) { /* Keep lockdep happy. */ spin_release(&console_owner_dep_map, _THIS_IP_); return 0; } raw_spin_lock(&console_owner_lock); waiter = READ_ONCE(console_waiter); console_owner = NULL; raw_spin_unlock(&console_owner_lock); if (!waiter) { spin_release(&console_owner_dep_map, _THIS_IP_); return 0; } /* The waiter is now free to continue */ WRITE_ONCE(console_waiter, false); spin_release(&console_owner_dep_map, _THIS_IP_); /* * Preserve lockdep lock ordering. Release the SRCU read lock before * releasing the console_lock. */ console_srcu_read_unlock(cookie); /* * Hand off console_lock to waiter. The waiter will perform * the up(). After this, the waiter is the console_lock owner. */ mutex_release(&console_lock_dep_map, _THIS_IP_); return 1; } /** * console_trylock_spinning - try to get console_lock by busy waiting * * This allows to busy wait for the console_lock when the current * owner is running in specially marked sections. It means that * the current owner is running and cannot reschedule until it * is ready to lose the lock. * * Return: 1 if we got the lock, 0 othrewise */ static int console_trylock_spinning(void) { struct task_struct *owner = NULL; bool waiter; bool spin = false; unsigned long flags; if (console_trylock()) return 1; /* * It's unsafe to spin once a panic has begun. If we are the * panic CPU, we may have already halted the owner of the * console_sem. If we are not the panic CPU, then we should * avoid taking console_sem, so the panic CPU has a better * chance of cleanly acquiring it later. */ if (panic_in_progress()) return 0; printk_safe_enter_irqsave(flags); raw_spin_lock(&console_owner_lock); owner = READ_ONCE(console_owner); waiter = READ_ONCE(console_waiter); if (!waiter && owner && owner != current) { WRITE_ONCE(console_waiter, true); spin = true; } raw_spin_unlock(&console_owner_lock); /* * If there is an active printk() writing to the * consoles, instead of having it write our data too, * see if we can offload that load from the active * printer, and do some printing ourselves. * Go into a spin only if there isn't already a waiter * spinning, and there is an active printer, and * that active printer isn't us (recursive printk?). */ if (!spin) { printk_safe_exit_irqrestore(flags); return 0; } /* We spin waiting for the owner to release us */ spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_); /* Owner will clear console_waiter on hand off */ while (READ_ONCE(console_waiter)) cpu_relax(); spin_release(&console_owner_dep_map, _THIS_IP_); printk_safe_exit_irqrestore(flags); /* * The owner passed the console lock to us. * Since we did not spin on console lock, annotate * this as a trylock. Otherwise lockdep will * complain. */ mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_); /* * Update @console_may_schedule for trylock because the previous * owner may have been schedulable. */ console_may_schedule = 0; return 1; } /* * Recursion is tracked separately on each CPU. If NMIs are supported, an * additional NMI context per CPU is also separately tracked. Until per-CPU * is available, a separate "early tracking" is performed. */ static DEFINE_PER_CPU(u8, printk_count); static u8 printk_count_early; #ifdef CONFIG_HAVE_NMI static DEFINE_PER_CPU(u8, printk_count_nmi); static u8 printk_count_nmi_early; #endif /* * Recursion is limited to keep the output sane. printk() should not require * more than 1 level of recursion (allowing, for example, printk() to trigger * a WARN), but a higher value is used in case some printk-internal errors * exist, such as the ringbuffer validation checks failing. */ #define PRINTK_MAX_RECURSION 3 /* * Return a pointer to the dedicated counter for the CPU+context of the * caller. */ static u8 *__printk_recursion_counter(void) { #ifdef CONFIG_HAVE_NMI if (in_nmi()) { if (printk_percpu_data_ready()) return this_cpu_ptr(&printk_count_nmi); return &printk_count_nmi_early; } #endif if (printk_percpu_data_ready()) return this_cpu_ptr(&printk_count); return &printk_count_early; } /* * Enter recursion tracking. Interrupts are disabled to simplify tracking. * The caller must check the boolean return value to see if the recursion is * allowed. On failure, interrupts are not disabled. * * @recursion_ptr must be a variable of type (u8 *) and is the same variable * that is passed to printk_exit_irqrestore(). */ #define printk_enter_irqsave(recursion_ptr, flags) \ ({ \ bool success = true; \ \ typecheck(u8 *, recursion_ptr); \ local_irq_save(flags); \ (recursion_ptr) = __printk_recursion_counter(); \ if (*(recursion_ptr) > PRINTK_MAX_RECURSION) { \ local_irq_restore(flags); \ success = false; \ } else { \ (*(recursion_ptr))++; \ } \ success; \ }) /* Exit recursion tracking, restoring interrupts. */ #define printk_exit_irqrestore(recursion_ptr, flags) \ do { \ typecheck(u8 *, recursion_ptr); \ (*(recursion_ptr))--; \ local_irq_restore(flags); \ } while (0) int printk_delay_msec __read_mostly; static inline void printk_delay(int level) { boot_delay_msec(level); if (unlikely(printk_delay_msec)) { int m = printk_delay_msec; while (m--) { mdelay(1); touch_nmi_watchdog(); } } } static inline u32 printk_caller_id(void) { return in_task() ? task_pid_nr(current) : 0x80000000 + smp_processor_id(); } /** * printk_parse_prefix - Parse level and control flags. * * @text: The terminated text message. * @level: A pointer to the current level value, will be updated. * @flags: A pointer to the current printk_info flags, will be updated. * * @level may be NULL if the caller is not interested in the parsed value. * Otherwise the variable pointed to by @level must be set to * LOGLEVEL_DEFAULT in order to be updated with the parsed value. * * @flags may be NULL if the caller is not interested in the parsed value. * Otherwise the variable pointed to by @flags will be OR'd with the parsed * value. * * Return: The length of the parsed level and control flags. */ u16 printk_parse_prefix(const char *text, int *level, enum printk_info_flags *flags) { u16 prefix_len = 0; int kern_level; while (*text) { kern_level = printk_get_level(text); if (!kern_level) break; switch (kern_level) { case '0' ... '7': if (level && *level == LOGLEVEL_DEFAULT) *level = kern_level - '0'; break; case 'c': /* KERN_CONT */ if (flags) *flags |= LOG_CONT; } prefix_len += 2; text += 2; } return prefix_len; } __printf(5, 0) static u16 printk_sprint(char *text, u16 size, int facility, enum printk_info_flags *flags, const char *fmt, va_list args) { u16 text_len; text_len = vscnprintf(text, size, fmt, args); /* Mark and strip a trailing newline. */ if (text_len && text[text_len - 1] == '\n') { text_len--; *flags |= LOG_NEWLINE; } /* Strip log level and control flags. */ if (facility == 0) { u16 prefix_len; prefix_len = printk_parse_prefix(text, NULL, NULL); if (prefix_len) { text_len -= prefix_len; memmove(text, text + prefix_len, text_len); } } trace_console(text, text_len); return text_len; } __printf(4, 0) int vprintk_store(int facility, int level, const struct dev_printk_info *dev_info, const char *fmt, va_list args) { struct prb_reserved_entry e; enum printk_info_flags flags = 0; struct printk_record r; unsigned long irqflags; u16 trunc_msg_len = 0; char prefix_buf[8]; u8 *recursion_ptr; u16 reserve_size; va_list args2; u32 caller_id; u16 text_len; int ret = 0; u64 ts_nsec; if (!printk_enter_irqsave(recursion_ptr, irqflags)) return 0; /* * Since the duration of printk() can vary depending on the message * and state of the ringbuffer, grab the timestamp now so that it is * close to the call of printk(). This provides a more deterministic * timestamp with respect to the caller. */ ts_nsec = local_clock(); caller_id = printk_caller_id(); /* * The sprintf needs to come first since the syslog prefix might be * passed in as a parameter. An extra byte must be reserved so that * later the vscnprintf() into the reserved buffer has room for the * terminating '\0', which is not counted by vsnprintf(). */ va_copy(args2, args); reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1; va_end(args2); if (reserve_size > PRINTKRB_RECORD_MAX) reserve_size = PRINTKRB_RECORD_MAX; /* Extract log level or control flags. */ if (facility == 0) printk_parse_prefix(&prefix_buf[0], &level, &flags); if (level == LOGLEVEL_DEFAULT) level = default_message_loglevel; if (dev_info) flags |= LOG_NEWLINE; if (is_printk_force_console()) flags |= LOG_FORCE_CON; if (flags & LOG_CONT) { prb_rec_init_wr(&r, reserve_size); if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) { text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size, facility, &flags, fmt, args); r.info->text_len += text_len; if (flags & LOG_FORCE_CON) r.info->flags |= LOG_FORCE_CON; if (flags & LOG_NEWLINE) { r.info->flags |= LOG_NEWLINE; prb_final_commit(&e); } else { prb_commit(&e); } ret = text_len; goto out; } } /* * Explicitly initialize the record before every prb_reserve() call. * prb_reserve_in_last() and prb_reserve() purposely invalidate the * structure when they fail. */ prb_rec_init_wr(&r, reserve_size); if (!prb_reserve(&e, prb, &r)) { /* truncate the message if it is too long for empty buffer */ truncate_msg(&reserve_size, &trunc_msg_len); prb_rec_init_wr(&r, reserve_size + trunc_msg_len); if (!prb_reserve(&e, prb, &r)) goto out; } /* fill message */ text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args); if (trunc_msg_len) memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len); r.info->text_len = text_len + trunc_msg_len; r.info->facility = facility; r.info->level = level & 7; r.info->flags = flags & 0x1f; r.info->ts_nsec = ts_nsec; r.info->caller_id = caller_id; if (dev_info) memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info)); /* A message without a trailing newline can be continued. */ if (!(flags & LOG_NEWLINE)) prb_commit(&e); else prb_final_commit(&e); ret = text_len + trunc_msg_len; out: printk_exit_irqrestore(recursion_ptr, irqflags); return ret; } /* * This acts as a one-way switch to allow legacy consoles to print from * the printk() caller context on a panic CPU. It also attempts to flush * the legacy consoles in this context. */ void printk_legacy_allow_panic_sync(void) { struct console_flush_type ft; legacy_allow_panic_sync = true; printk_get_console_flush_type(&ft); if (ft.legacy_direct) { if (console_trylock()) console_unlock(); } } bool __read_mostly debug_non_panic_cpus; #ifdef CONFIG_PRINTK_CALLER static int __init debug_non_panic_cpus_setup(char *str) { debug_non_panic_cpus = true; pr_info("allow messages from non-panic CPUs in panic()\n"); return 0; } early_param("debug_non_panic_cpus", debug_non_panic_cpus_setup); module_param(debug_non_panic_cpus, bool, 0644); MODULE_PARM_DESC(debug_non_panic_cpus, "allow messages from non-panic CPUs in panic()"); #endif asmlinkage int vprintk_emit(int facility, int level, const struct dev_printk_info *dev_info, const char *fmt, va_list args) { struct console_flush_type ft; int printed_len; /* Suppress unimportant messages after panic happens */ if (unlikely(suppress_printk)) return 0; /* * The messages on the panic CPU are the most important. If * non-panic CPUs are generating any messages, they will be * silently dropped. */ if (other_cpu_in_panic() && !debug_non_panic_cpus && !panic_triggering_all_cpu_backtrace) return 0; printk_get_console_flush_type(&ft); /* If called from the scheduler, we can not call up(). */ if (level == LOGLEVEL_SCHED) { level = LOGLEVEL_DEFAULT; ft.legacy_offload |= ft.legacy_direct; ft.legacy_direct = false; } printk_delay(level); printed_len = vprintk_store(facility, level, dev_info, fmt, args); if (ft.nbcon_atomic) nbcon_atomic_flush_pending(); if (ft.nbcon_offload) nbcon_kthreads_wake(); if (ft.legacy_direct) { /* * The caller may be holding system-critical or * timing-sensitive locks. Disable preemption during * printing of all remaining records to all consoles so that * this context can return as soon as possible. Hopefully * another printk() caller will take over the printing. */ preempt_disable(); /* * Try to acquire and then immediately release the console * semaphore. The release will print out buffers. With the * spinning variant, this context tries to take over the * printing from another printing context. */ if (console_trylock_spinning()) console_unlock(); preempt_enable(); } if (ft.legacy_offload) defer_console_output(); else wake_up_klogd(); return printed_len; } EXPORT_SYMBOL(vprintk_emit); int vprintk_default(const char *fmt, va_list args) { return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args); } EXPORT_SYMBOL_GPL(vprintk_default); asmlinkage __visible int _printk(const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = vprintk(fmt, args); va_end(args); return r; } EXPORT_SYMBOL(_printk); static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); #else /* CONFIG_PRINTK */ #define printk_time false #define prb_read_valid(rb, seq, r) false #define prb_first_valid_seq(rb) 0 #define prb_next_seq(rb) 0 static u64 syslog_seq; static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } #endif /* CONFIG_PRINTK */ #ifdef CONFIG_EARLY_PRINTK struct console *early_console; asmlinkage __visible void early_printk(const char *fmt, ...) { va_list ap; char buf[512]; int n; if (!early_console) return; va_start(ap, fmt); n = vscnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); early_console->write(early_console, buf, n); } #endif static void set_user_specified(struct console_cmdline *c, bool user_specified) { if (!user_specified) return; /* * @c console was defined by the user on the command line. * Do not clear when added twice also by SPCR or the device tree. */ c->user_specified = true; /* At least one console defined by the user on the command line. */ console_set_on_cmdline = 1; } static int __add_preferred_console(const char *name, const short idx, const char *devname, char *options, char *brl_options, bool user_specified) { struct console_cmdline *c; int i; if (!name && !devname) return -EINVAL; /* * We use a signed short index for struct console for device drivers to * indicate a not yet assigned index or port. However, a negative index * value is not valid when the console name and index are defined on * the command line. */ if (name && idx < 0) return -EINVAL; /* * See if this tty is not yet registered, and * if we have a slot free. */ for (i = 0, c = console_cmdline; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); i++, c++) { if ((name && strcmp(c->name, name) == 0 && c->index == idx) || (devname && strcmp(c->devname, devname) == 0)) { if (!brl_options) preferred_console = i; set_user_specified(c, user_specified); return 0; } } if (i == MAX_CMDLINECONSOLES) return -E2BIG; if (!brl_options) preferred_console = i; if (name) strscpy(c->name, name); if (devname) strscpy(c->devname, devname); c->options = options; set_user_specified(c, user_specified); braille_set_options(c, brl_options); c->index = idx; return 0; } static int __init console_msg_format_setup(char *str) { if (!strcmp(str, "syslog")) console_msg_format = MSG_FORMAT_SYSLOG; if (!strcmp(str, "default")) console_msg_format = MSG_FORMAT_DEFAULT; return 1; } __setup("console_msg_format=", console_msg_format_setup); /* * Set up a console. Called via do_early_param() in init/main.c * for each "console=" parameter in the boot command line. */ static int __init console_setup(char *str) { static_assert(sizeof(console_cmdline[0].devname) >= sizeof(console_cmdline[0].name) + 4); char buf[sizeof(console_cmdline[0].devname)]; char *brl_options = NULL; char *ttyname = NULL; char *devname = NULL; char *options; char *s; int idx; /* * console="" or console=null have been suggested as a way to * disable console output. Use ttynull that has been created * for exactly this purpose. */ if (str[0] == 0 || strcmp(str, "null") == 0) { __add_preferred_console("ttynull", 0, NULL, NULL, NULL, true); return 1; } if (_braille_console_setup(&str, &brl_options)) return 1; /* For a DEVNAME:0.0 style console the character device is unknown early */ if (strchr(str, ':')) devname = buf; else ttyname = buf; /* * Decode str into name, index, options. */ if (ttyname && isdigit(str[0])) scnprintf(buf, sizeof(buf), "ttyS%s", str); else strscpy(buf, str); options = strchr(str, ','); if (options) *(options++) = 0; #ifdef __sparc__ if (!strcmp(str, "ttya")) strscpy(buf, "ttyS0"); if (!strcmp(str, "ttyb")) strscpy(buf, "ttyS1"); #endif for (s = buf; *s; s++) if ((ttyname && isdigit(*s)) || *s == ',') break; /* @idx will get defined when devname matches. */ if (devname) idx = -1; else idx = simple_strtoul(s, NULL, 10); *s = 0; __add_preferred_console(ttyname, idx, devname, options, brl_options, true); return 1; } __setup("console=", console_setup); /** * add_preferred_console - add a device to the list of preferred consoles. * @name: device name * @idx: device index * @options: options for this console * * The last preferred console added will be used for kernel messages * and stdin/out/err for init. Normally this is used by console_setup * above to handle user-supplied console arguments; however it can also * be used by arch-specific code either to override the user or more * commonly to provide a default console (ie from PROM variables) when * the user has not supplied one. */ int add_preferred_console(const char *name, const short idx, char *options) { return __add_preferred_console(name, idx, NULL, options, NULL, false); } /** * match_devname_and_update_preferred_console - Update a preferred console * when matching devname is found. * @devname: DEVNAME:0.0 style device name * @name: Name of the corresponding console driver, e.g. "ttyS" * @idx: Console index, e.g. port number. * * The function checks whether a device with the given @devname is * preferred via the console=DEVNAME:0.0 command line option. * It fills the missing console driver name and console index * so that a later register_console() call could find (match) * and enable this device. * * It might be used when a driver subsystem initializes particular * devices with already known DEVNAME:0.0 style names. And it * could predict which console driver name and index this device * would later get associated with. * * Return: 0 on success, negative error code on failure. */ int match_devname_and_update_preferred_console(const char *devname, const char *name, const short idx) { struct console_cmdline *c = console_cmdline; int i; if (!devname || !strlen(devname) || !name || !strlen(name) || idx < 0) return -EINVAL; for (i = 0; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); i++, c++) { if (!strcmp(devname, c->devname)) { pr_info("associate the preferred console \"%s\" with \"%s%d\"\n", devname, name, idx); strscpy(c->name, name); c->index = idx; return 0; } } return -ENOENT; } EXPORT_SYMBOL_GPL(match_devname_and_update_preferred_console); bool console_suspend_enabled = true; EXPORT_SYMBOL(console_suspend_enabled); static int __init console_suspend_disable(char *str) { console_suspend_enabled = false; return 1; } __setup("no_console_suspend", console_suspend_disable); module_param_named(console_suspend, console_suspend_enabled, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(console_suspend, "suspend console during suspend" " and hibernate operations"); static bool printk_console_no_auto_verbose; void console_verbose(void) { if (console_loglevel && !printk_console_no_auto_verbose) console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; } EXPORT_SYMBOL_GPL(console_verbose); module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644); MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc"); /** * console_suspend_all - suspend the console subsystem * * This disables printk() while we go into suspend states */ void console_suspend_all(void) { struct console *con; if (!console_suspend_enabled) return; pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); pr_flush(1000, true); console_list_lock(); for_each_console(con) console_srcu_write_flags(con, con->flags | CON_SUSPENDED); console_list_unlock(); /* * Ensure that all SRCU list walks have completed. All printing * contexts must be able to see that they are suspended so that it * is guaranteed that all printing has stopped when this function * completes. */ synchronize_srcu(&console_srcu); } void console_resume_all(void) { struct console_flush_type ft; struct console *con; if (!console_suspend_enabled) return; console_list_lock(); for_each_console(con) console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED); console_list_unlock(); /* * Ensure that all SRCU list walks have completed. All printing * contexts must be able to see they are no longer suspended so * that they are guaranteed to wake up and resume printing. */ synchronize_srcu(&console_srcu); printk_get_console_flush_type(&ft); if (ft.nbcon_offload) nbcon_kthreads_wake(); if (ft.legacy_offload) defer_console_output(); pr_flush(1000, true); } /** * console_cpu_notify - print deferred console messages after CPU hotplug * @cpu: unused * * If printk() is called from a CPU that is not online yet, the messages * will be printed on the console only if there are CON_ANYTIME consoles. * This function is called when a new CPU comes online (or fails to come * up) or goes offline. */ static int console_cpu_notify(unsigned int cpu) { struct console_flush_type ft; if (!cpuhp_tasks_frozen) { printk_get_console_flush_type(&ft); if (ft.nbcon_atomic) nbcon_atomic_flush_pending(); if (ft.legacy_direct) { if (console_trylock()) console_unlock(); } } return 0; } /** * console_lock - block the console subsystem from printing * * Acquires a lock which guarantees that no consoles will * be in or enter their write() callback. * * Can sleep, returns nothing. */ void console_lock(void) { might_sleep(); /* On panic, the console_lock must be left to the panic cpu. */ while (other_cpu_in_panic()) msleep(1000); down_console_sem(); console_locked = 1; console_may_schedule = 1; } EXPORT_SYMBOL(console_lock); /** * console_trylock - try to block the console subsystem from printing * * Try to acquire a lock which guarantees that no consoles will * be in or enter their write() callback. * * returns 1 on success, and 0 on failure to acquire the lock. */ int console_trylock(void) { /* On panic, the console_lock must be left to the panic cpu. */ if (other_cpu_in_panic()) return 0; if (down_trylock_console_sem()) return 0; console_locked = 1; console_may_schedule = 0; return 1; } EXPORT_SYMBOL(console_trylock); int is_console_locked(void) { return console_locked; } EXPORT_SYMBOL(is_console_locked); static void __console_unlock(void) { console_locked = 0; up_console_sem(); } #ifdef CONFIG_PRINTK /* * Prepend the message in @pmsg->pbufs->outbuf. This is achieved by shifting * the existing message over and inserting the scratchbuf message. * * @pmsg is the original printk message. * @fmt is the printf format of the message which will prepend the existing one. * * If there is not enough space in @pmsg->pbufs->outbuf, the existing * message text will be sufficiently truncated. * * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated. */ __printf(2, 3) static void console_prepend_message(struct printk_message *pmsg, const char *fmt, ...) { struct printk_buffers *pbufs = pmsg->pbufs; const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); const size_t outbuf_sz = sizeof(pbufs->outbuf); char *scratchbuf = &pbufs->scratchbuf[0]; char *outbuf = &pbufs->outbuf[0]; va_list args; size_t len; va_start(args, fmt); len = vscnprintf(scratchbuf, scratchbuf_sz, fmt, args); va_end(args); /* * Make sure outbuf is sufficiently large before prepending. * Keep at least the prefix when the message must be truncated. * It is a rather theoretical problem when someone tries to * use a minimalist buffer. */ if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz)) return; if (pmsg->outbuf_len + len >= outbuf_sz) { /* Truncate the message, but keep it terminated. */ pmsg->outbuf_len = outbuf_sz - (len + 1); outbuf[pmsg->outbuf_len] = 0; } memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1); memcpy(outbuf, scratchbuf, len); pmsg->outbuf_len += len; } /* * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". * @pmsg->outbuf_len is updated appropriately. * * @pmsg is the printk message to prepend. * * @dropped is the dropped count to report in the dropped message. */ void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped) { console_prepend_message(pmsg, "** %lu printk messages dropped **\n", dropped); } /* * Prepend the message in @pmsg->pbufs->outbuf with a "replay message". * @pmsg->outbuf_len is updated appropriately. * * @pmsg is the printk message to prepend. */ void console_prepend_replay(struct printk_message *pmsg) { console_prepend_message(pmsg, "** replaying previous printk message **\n"); } /* * Read and format the specified record (or a later record if the specified * record is not available). * * @pmsg will contain the formatted result. @pmsg->pbufs must point to a * struct printk_buffers. * * @seq is the record to read and format. If it is not available, the next * valid record is read. * * @is_extended specifies if the message should be formatted for extended * console output. * * @may_supress specifies if records may be skipped based on loglevel. * * Returns false if no record is available. Otherwise true and all fields * of @pmsg are valid. (See the documentation of struct printk_message * for information about the @pmsg fields.) */ bool printk_get_next_message(struct printk_message *pmsg, u64 seq, bool is_extended, bool may_suppress) { struct printk_buffers *pbufs = pmsg->pbufs; const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf); const size_t outbuf_sz = sizeof(pbufs->outbuf); char *scratchbuf = &pbufs->scratchbuf[0]; char *outbuf = &pbufs->outbuf[0]; struct printk_info info; struct printk_record r; size_t len = 0; bool force_con; /* * Formatting extended messages requires a separate buffer, so use the * scratch buffer to read in the ringbuffer text. * * Formatting normal messages is done in-place, so read the ringbuffer * text directly into the output buffer. */ if (is_extended) prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz); else prb_rec_init_rd(&r, &info, outbuf, outbuf_sz); if (!prb_read_valid(prb, seq, &r)) return false; pmsg->seq = r.info->seq; pmsg->dropped = r.info->seq - seq; force_con = r.info->flags & LOG_FORCE_CON; /* * Skip records that are not forced to be printed on consoles and that * has level above the console loglevel. */ if (!force_con && may_suppress && suppress_message_printing(r.info->level)) goto out; if (is_extended) { len = info_print_ext_header(outbuf, outbuf_sz, r.info); len += msg_print_ext_body(outbuf + len, outbuf_sz - len, &r.text_buf[0], r.info->text_len, &r.info->dev_info); } else { len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); } out: pmsg->outbuf_len = len; return true; } /* * Legacy console printing from printk() caller context does not respect * raw_spinlock/spinlock nesting. For !PREEMPT_RT the lockdep warning is a * false positive. For PREEMPT_RT the false positive condition does not * occur. * * This map is used to temporarily establish LD_WAIT_SLEEP context for the * console write() callback when legacy printing to avoid false positive * lockdep complaints, thus allowing lockdep to continue to function for * real issues. */ #ifdef CONFIG_PREEMPT_RT static inline void printk_legacy_allow_spinlock_enter(void) { } static inline void printk_legacy_allow_spinlock_exit(void) { } #else static DEFINE_WAIT_OVERRIDE_MAP(printk_legacy_map, LD_WAIT_SLEEP); static inline void printk_legacy_allow_spinlock_enter(void) { lock_map_acquire_try(&printk_legacy_map); } static inline void printk_legacy_allow_spinlock_exit(void) { lock_map_release(&printk_legacy_map); } #endif /* CONFIG_PREEMPT_RT */ /* * Used as the printk buffers for non-panic, serialized console printing. * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles. * Its usage requires the console_lock held. */ struct printk_buffers printk_shared_pbufs; /* * Print one record for the given console. The record printed is whatever * record is the next available record for the given console. * * @handover will be set to true if a printk waiter has taken over the * console_lock, in which case the caller is no longer holding both the * console_lock and the SRCU read lock. Otherwise it is set to false. * * @cookie is the cookie from the SRCU read lock. * * Returns false if the given console has no next record to print, otherwise * true. * * Requires the console_lock and the SRCU read lock. */ static bool console_emit_next_record(struct console *con, bool *handover, int cookie) { bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED; char *outbuf = &printk_shared_pbufs.outbuf[0]; struct printk_message pmsg = { .pbufs = &printk_shared_pbufs, }; unsigned long flags; *handover = false; if (!printk_get_next_message(&pmsg, con->seq, is_extended, true)) return false; con->dropped += pmsg.dropped; /* Skip messages of formatted length 0. */ if (pmsg.outbuf_len == 0) { con->seq = pmsg.seq + 1; goto skip; } if (con->dropped && !is_extended) { console_prepend_dropped(&pmsg, con->dropped); con->dropped = 0; } /* Write everything out to the hardware. */ if (force_legacy_kthread() && !panic_in_progress()) { /* * With forced threading this function is in a task context * (either legacy kthread or get_init_console_seq()). There * is no need for concern about printk reentrance, handovers, * or lockdep complaints. */ con->write(con, outbuf, pmsg.outbuf_len); con->seq = pmsg.seq + 1; } else { /* * While actively printing out messages, if another printk() * were to occur on another CPU, it may wait for this one to * finish. This task can not be preempted if there is a * waiter waiting to take over. * * Interrupts are disabled because the hand over to a waiter * must not be interrupted until the hand over is completed * (@console_waiter is cleared). */ printk_safe_enter_irqsave(flags); console_lock_spinning_enable(); /* Do not trace print latency. */ stop_critical_timings(); printk_legacy_allow_spinlock_enter(); con->write(con, outbuf, pmsg.outbuf_len); printk_legacy_allow_spinlock_exit(); start_critical_timings(); con->seq = pmsg.seq + 1; *handover = console_lock_spinning_disable_and_check(cookie); printk_safe_exit_irqrestore(flags); } skip: return true; } #else static bool console_emit_next_record(struct console *con, bool *handover, int cookie) { *handover = false; return false; } static inline void printk_kthreads_check_locked(void) { } #endif /* CONFIG_PRINTK */ /* * Print out all remaining records to all consoles. * * @do_cond_resched is set by the caller. It can be true only in schedulable * context. * * @next_seq is set to the sequence number after the last available record. * The value is valid only when this function returns true. It means that all * usable consoles are completely flushed. * * @handover will be set to true if a printk waiter has taken over the * console_lock, in which case the caller is no longer holding the * console_lock. Otherwise it is set to false. * * Returns true when there was at least one usable console and all messages * were flushed to all usable consoles. A returned false informs the caller * that everything was not flushed (either there were no usable consoles or * another context has taken over printing or it is a panic situation and this * is not the panic CPU). Regardless the reason, the caller should assume it * is not useful to immediately try again. * * Requires the console_lock. */ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) { struct console_flush_type ft; bool any_usable = false; struct console *con; bool any_progress; int cookie; *next_seq = 0; *handover = false; do { any_progress = false; printk_get_console_flush_type(&ft); cookie = console_srcu_read_lock(); for_each_console_srcu(con) { short flags = console_srcu_read_flags(con); u64 printk_seq; bool progress; /* * console_flush_all() is only responsible for nbcon * consoles when the nbcon consoles cannot print via * their atomic or threaded flushing. */ if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) continue; if (!console_is_usable(con, flags, !do_cond_resched)) continue; any_usable = true; if (flags & CON_NBCON) { progress = nbcon_legacy_emit_next_record(con, handover, cookie, !do_cond_resched); printk_seq = nbcon_seq_read(con); } else { progress = console_emit_next_record(con, handover, cookie); printk_seq = con->seq; } /* * If a handover has occurred, the SRCU read lock * is already released. */ if (*handover) return false; /* Track the next of the highest seq flushed. */ if (printk_seq > *next_seq) *next_seq = printk_seq; if (!progress) continue; any_progress = true; /* Allow panic_cpu to take over the consoles safely. */ if (other_cpu_in_panic()) goto abandon; if (do_cond_resched) cond_resched(); } console_srcu_read_unlock(cookie); } while (any_progress); return any_usable; abandon: console_srcu_read_unlock(cookie); return false; } static void __console_flush_and_unlock(void) { bool do_cond_resched; bool handover; bool flushed; u64 next_seq; /* * Console drivers are called with interrupts disabled, so * @console_may_schedule should be cleared before; however, we may * end up dumping a lot of lines, for example, if called from * console registration path, and should invoke cond_resched() * between lines if allowable. Not doing so can cause a very long * scheduling stall on a slow console leading to RCU stall and * softlockup warnings which exacerbate the issue with more * messages practically incapacitating the system. Therefore, create * a local to use for the printing loop. */ do_cond_resched = console_may_schedule; do { console_may_schedule = 0; flushed = console_flush_all(do_cond_resched, &next_seq, &handover); if (!handover) __console_unlock(); /* * Abort if there was a failure to flush all messages to all * usable consoles. Either it is not possible to flush (in * which case it would be an infinite loop of retrying) or * another context has taken over printing. */ if (!flushed) break; /* * Some context may have added new records after * console_flush_all() but before unlocking the console. * Re-check if there is a new record to flush. If the trylock * fails, another context is already handling the printing. */ } while (prb_read_valid(prb, next_seq, NULL) && console_trylock()); } /** * console_unlock - unblock the legacy console subsystem from printing * * Releases the console_lock which the caller holds to block printing of * the legacy console subsystem. * * While the console_lock was held, console output may have been buffered * by printk(). If this is the case, console_unlock() emits the output on * legacy consoles prior to releasing the lock. * * console_unlock(); may be called from any context. */ void console_unlock(void) { struct console_flush_type ft; printk_get_console_flush_type(&ft); if (ft.legacy_direct) __console_flush_and_unlock(); else __console_unlock(); } EXPORT_SYMBOL(console_unlock); /** * console_conditional_schedule - yield the CPU if required * * If the console code is currently allowed to sleep, and * if this CPU should yield the CPU to another task, do * so here. * * Must be called within console_lock();. */ void __sched console_conditional_schedule(void) { if (console_may_schedule) cond_resched(); } EXPORT_SYMBOL(console_conditional_schedule); void console_unblank(void) { bool found_unblank = false; struct console *c; int cookie; /* * First check if there are any consoles implementing the unblank() * callback. If not, there is no reason to continue and take the * console lock, which in particular can be dangerous if * @oops_in_progress is set. */ cookie = console_srcu_read_lock(); for_each_console_srcu(c) { short flags = console_srcu_read_flags(c); if (flags & CON_SUSPENDED) continue; if ((flags & CON_ENABLED) && c->unblank) { found_unblank = true; break; } } console_srcu_read_unlock(cookie); if (!found_unblank) return; /* * Stop console printing because the unblank() callback may * assume the console is not within its write() callback. * * If @oops_in_progress is set, this may be an atomic context. * In that case, attempt a trylock as best-effort. */ if (oops_in_progress) { /* Semaphores are not NMI-safe. */ if (in_nmi()) return; /* * Attempting to trylock the console lock can deadlock * if another CPU was stopped while modifying the * semaphore. "Hope and pray" that this is not the * current situation. */ if (down_trylock_console_sem() != 0) return; } else console_lock(); console_locked = 1; console_may_schedule = 0; cookie = console_srcu_read_lock(); for_each_console_srcu(c) { short flags = console_srcu_read_flags(c); if (flags & CON_SUSPENDED) continue; if ((flags & CON_ENABLED) && c->unblank) c->unblank(); } console_srcu_read_unlock(cookie); console_unlock(); if (!oops_in_progress) pr_flush(1000, true); } /* * Rewind all consoles to the oldest available record. * * IMPORTANT: The function is safe only when called under * console_lock(). It is not enforced because * it is used as a best effort in panic(). */ static void __console_rewind_all(void) { struct console *c; short flags; int cookie; u64 seq; seq = prb_first_valid_seq(prb); cookie = console_srcu_read_lock(); for_each_console_srcu(c) { flags = console_srcu_read_flags(c); if (flags & CON_NBCON) { nbcon_seq_force(c, seq); } else { /* * This assignment is safe only when called under * console_lock(). On panic, legacy consoles are * only best effort. */ c->seq = seq; } } console_srcu_read_unlock(cookie); } /** * console_flush_on_panic - flush console content on panic * @mode: flush all messages in buffer or just the pending ones * * Immediately output all pending messages no matter what. */ void console_flush_on_panic(enum con_flush_mode mode) { struct console_flush_type ft; bool handover; u64 next_seq; /* * Ignore the console lock and flush out the messages. Attempting a * trylock would not be useful because: * * - if it is contended, it must be ignored anyway * - console_lock() and console_trylock() block and fail * respectively in panic for non-panic CPUs * - semaphores are not NMI-safe */ /* * If another context is holding the console lock, * @console_may_schedule might be set. Clear it so that * this context does not call cond_resched() while flushing. */ console_may_schedule = 0; if (mode == CONSOLE_REPLAY_ALL) __console_rewind_all(); printk_get_console_flush_type(&ft); if (ft.nbcon_atomic) nbcon_atomic_flush_pending(); /* Flush legacy consoles once allowed, even when dangerous. */ if (legacy_allow_panic_sync) console_flush_all(false, &next_seq, &handover); } /* * Return the console tty driver structure and its associated index */ struct tty_driver *console_device(int *index) { struct console *c; struct tty_driver *driver = NULL; int cookie; /* * Take console_lock to serialize device() callback with * other console operations. For example, fg_console is * modified under console_lock when switching vt. */ console_lock(); cookie = console_srcu_read_lock(); for_each_console_srcu(c) { if (!c->device) continue; driver = c->device(c, index); if (driver) break; } console_srcu_read_unlock(cookie); console_unlock(); return driver; } /* * Prevent further output on the passed console device so that (for example) * serial drivers can suspend console output before suspending a port, and can * re-enable output afterwards. */ void console_suspend(struct console *console) { __pr_flush(console, 1000, true); console_list_lock(); console_srcu_write_flags(console, console->flags & ~CON_ENABLED); console_list_unlock(); /* * Ensure that all SRCU list walks have completed. All contexts must * be able to see that this console is disabled so that (for example) * the caller can suspend the port without risk of another context * using the port. */ synchronize_srcu(&console_srcu); } EXPORT_SYMBOL(console_suspend); void console_resume(struct console *console) { struct console_flush_type ft; bool is_nbcon; console_list_lock(); console_srcu_write_flags(console, console->flags | CON_ENABLED); is_nbcon = console->flags & CON_NBCON; console_list_unlock(); /* * Ensure that all SRCU list walks have completed. The related * printing context must be able to see it is enabled so that * it is guaranteed to wake up and resume printing. */ synchronize_srcu(&console_srcu); printk_get_console_flush_type(&ft); if (is_nbcon && ft.nbcon_offload) nbcon_kthread_wake(console); else if (ft.legacy_offload) defer_console_output(); __pr_flush(console, 1000, true); } EXPORT_SYMBOL(console_resume); #ifdef CONFIG_PRINTK static int unregister_console_locked(struct console *console); /* True when system boot is far enough to create printer threads. */ static bool printk_kthreads_ready __ro_after_init; static struct task_struct *printk_legacy_kthread; static bool legacy_kthread_should_wakeup(void) { struct console_flush_type ft; struct console *con; bool ret = false; int cookie; if (kthread_should_stop()) return true; printk_get_console_flush_type(&ft); cookie = console_srcu_read_lock(); for_each_console_srcu(con) { short flags = console_srcu_read_flags(con); u64 printk_seq; /* * The legacy printer thread is only responsible for nbcon * consoles when the nbcon consoles cannot print via their * atomic or threaded flushing. */ if ((flags & CON_NBCON) && (ft.nbcon_atomic || ft.nbcon_offload)) continue; if (!console_is_usable(con, flags, false)) continue; if (flags & CON_NBCON) { printk_seq = nbcon_seq_read(con); } else { /* * It is safe to read @seq because only this * thread context updates @seq. */ printk_seq = con->seq; } if (prb_read_valid(prb, printk_seq, NULL)) { ret = true; break; } } console_srcu_read_unlock(cookie); return ret; } static int legacy_kthread_func(void *unused) { for (;;) { wait_event_interruptible(legacy_wait, legacy_kthread_should_wakeup()); if (kthread_should_stop()) break; console_lock(); __console_flush_and_unlock(); } return 0; } static bool legacy_kthread_create(void) { struct task_struct *kt; lockdep_assert_console_list_lock_held(); kt = kthread_run(legacy_kthread_func, NULL, "pr/legacy"); if (WARN_ON(IS_ERR(kt))) { pr_err("failed to start legacy printing thread\n"); return false; } printk_legacy_kthread = kt; /* * It is important that console printing threads are scheduled * shortly after a printk call and with generous runtime budgets. */ sched_set_normal(printk_legacy_kthread, -20); return true; } /** * printk_kthreads_shutdown - shutdown all threaded printers * * On system shutdown all threaded printers are stopped. This allows printk * to transition back to atomic printing, thus providing a robust mechanism * for the final shutdown/reboot messages to be output. */ static void printk_kthreads_shutdown(void) { struct console *con; console_list_lock(); if (printk_kthreads_running) { printk_kthreads_running = false; for_each_console(con) { if (con->flags & CON_NBCON) nbcon_kthread_stop(con); } /* * The threads may have been stopped while printing a * backlog. Flush any records left over. */ nbcon_atomic_flush_pending(); } console_list_unlock(); } static struct syscore_ops printk_syscore_ops = { .shutdown = printk_kthreads_shutdown, }; /* * If appropriate, start nbcon kthreads and set @printk_kthreads_running. * If any kthreads fail to start, those consoles are unregistered. * * Must be called under console_list_lock(). */ static void printk_kthreads_check_locked(void) { struct hlist_node *tmp; struct console *con; lockdep_assert_console_list_lock_held(); if (!printk_kthreads_ready) return; if (have_legacy_console || have_boot_console) { if (!printk_legacy_kthread && force_legacy_kthread() && !legacy_kthread_create()) { /* * All legacy consoles must be unregistered. If there * are any nbcon consoles, they will set up their own * kthread. */ hlist_for_each_entry_safe(con, tmp, &console_list, node) { if (con->flags & CON_NBCON) continue; unregister_console_locked(con); } } } else if (printk_legacy_kthread) { kthread_stop(printk_legacy_kthread); printk_legacy_kthread = NULL; } /* * Printer threads cannot be started as long as any boot console is * registered because there is no way to synchronize the hardware * registers between boot console code and regular console code. * It can only be known that there will be no new boot consoles when * an nbcon console is registered. */ if (have_boot_console || !have_nbcon_console) { /* Clear flag in case all nbcon consoles unregistered. */ printk_kthreads_running = false; return; } if (printk_kthreads_running) return; hlist_for_each_entry_safe(con, tmp, &console_list, node) { if (!(con->flags & CON_NBCON)) continue; if (!nbcon_kthread_create(con)) unregister_console_locked(con); } printk_kthreads_running = true; } static int __init printk_set_kthreads_ready(void) { register_syscore_ops(&printk_syscore_ops); console_list_lock(); printk_kthreads_ready = true; printk_kthreads_check_locked(); console_list_unlock(); return 0; } early_initcall(printk_set_kthreads_ready); #endif /* CONFIG_PRINTK */ static int __read_mostly keep_bootcon; static int __init keep_bootcon_setup(char *str) { keep_bootcon = 1; pr_info("debug: skip boot console de-registration.\n"); return 0; } early_param("keep_bootcon", keep_bootcon_setup); static int console_call_setup(struct console *newcon, char *options) { int err; if (!newcon->setup) return 0; /* Synchronize with possible boot console. */ console_lock(); err = newcon->setup(newcon, options); console_unlock(); return err; } /* * This is called by register_console() to try to match * the newly registered console with any of the ones selected * by either the command line or add_preferred_console() and * setup/enable it. * * Care need to be taken with consoles that are statically * enabled such as netconsole */ static int try_enable_preferred_console(struct console *newcon, bool user_specified) { struct console_cmdline *c; int i, err; for (i = 0, c = console_cmdline; i < MAX_CMDLINECONSOLES && (c->name[0] || c->devname[0]); i++, c++) { /* Console not yet initialized? */ if (!c->name[0]) continue; if (c->user_specified != user_specified) continue; if (!newcon->match || newcon->match(newcon, c->name, c->index, c->options) != 0) { /* default matching */ BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name)); if (strcmp(c->name, newcon->name) != 0) continue; if (newcon->index >= 0 && newcon->index != c->index) continue; if (newcon->index < 0) newcon->index = c->index; if (_braille_register_console(newcon, c)) return 0; err = console_call_setup(newcon, c->options); if (err) return err; } newcon->flags |= CON_ENABLED; if (i == preferred_console) newcon->flags |= CON_CONSDEV; return 0; } /* * Some consoles, such as pstore and netconsole, can be enabled even * without matching. Accept the pre-enabled consoles only when match() * and setup() had a chance to be called. */ if (newcon->flags & CON_ENABLED && c->user_specified == user_specified) return 0; return -ENOENT; } /* Try to enable the console unconditionally */ static void try_enable_default_console(struct console *newcon) { if (newcon->index < 0) newcon->index = 0; if (console_call_setup(newcon, NULL) != 0) return; newcon->flags |= CON_ENABLED; if (newcon->device) newcon->flags |= CON_CONSDEV; } /* Return the starting sequence number for a newly registered console. */ static u64 get_init_console_seq(struct console *newcon, bool bootcon_registered) { struct console *con; bool handover; u64 init_seq; if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) { /* Get a consistent copy of @syslog_seq. */ mutex_lock(&syslog_lock); init_seq = syslog_seq; mutex_unlock(&syslog_lock); } else { /* Begin with next message added to ringbuffer. */ init_seq = prb_next_seq(prb); /* * If any enabled boot consoles are due to be unregistered * shortly, some may not be caught up and may be the same * device as @newcon. Since it is not known which boot console * is the same device, flush all consoles and, if necessary, * start with the message of the enabled boot console that is * the furthest behind. */ if (bootcon_registered && !keep_bootcon) { /* * Hold the console_lock to stop console printing and * guarantee safe access to console->seq. */ console_lock(); /* * Flush all consoles and set the console to start at * the next unprinted sequence number. */ if (!console_flush_all(true, &init_seq, &handover)) { /* * Flushing failed. Just choose the lowest * sequence of the enabled boot consoles. */ /* * If there was a handover, this context no * longer holds the console_lock. */ if (handover) console_lock(); init_seq = prb_next_seq(prb); for_each_console(con) { u64 seq; if (!(con->flags & CON_BOOT) || !(con->flags & CON_ENABLED)) { continue; } if (con->flags & CON_NBCON) seq = nbcon_seq_read(con); else seq = con->seq; if (seq < init_seq) init_seq = seq; } } console_unlock(); } } return init_seq; } #define console_first() \ hlist_entry(console_list.first, struct console, node) static int unregister_console_locked(struct console *console); /* * The console driver calls this routine during kernel initialization * to register the console printing procedure with printk() and to * print any messages that were printed by the kernel before the * console driver was initialized. * * This can happen pretty early during the boot process (because of * early_printk) - sometimes before setup_arch() completes - be careful * of what kernel features are used - they may not be initialised yet. * * There are two types of consoles - bootconsoles (early_printk) and * "real" consoles (everything which is not a bootconsole) which are * handled differently. * - Any number of bootconsoles can be registered at any time. * - As soon as a "real" console is registered, all bootconsoles * will be unregistered automatically. * - Once a "real" console is registered, any attempt to register a * bootconsoles will be rejected */ void register_console(struct console *newcon) { bool use_device_lock = (newcon->flags & CON_NBCON) && newcon->write_atomic; bool bootcon_registered = false; bool realcon_registered = false; struct console *con; unsigned long flags; u64 init_seq; int err; console_list_lock(); for_each_console(con) { if (WARN(con == newcon, "console '%s%d' already registered\n", con->name, con->index)) { goto unlock; } if (con->flags & CON_BOOT) bootcon_registered = true; else realcon_registered = true; } /* Do not register boot consoles when there already is a real one. */ if ((newcon->flags & CON_BOOT) && realcon_registered) { pr_info("Too late to register bootconsole %s%d\n", newcon->name, newcon->index); goto unlock; } if (newcon->flags & CON_NBCON) { /* * Ensure the nbcon console buffers can be allocated * before modifying any global data. */ if (!nbcon_alloc(newcon)) goto unlock; } /* * See if we want to enable this console driver by default. * * Nope when a console is preferred by the command line, device * tree, or SPCR. * * The first real console with tty binding (driver) wins. More * consoles might get enabled before the right one is found. * * Note that a console with tty binding will have CON_CONSDEV * flag set and will be first in the list. */ if (preferred_console < 0) { if (hlist_empty(&console_list) || !console_first()->device || console_first()->flags & CON_BOOT) { try_enable_default_console(newcon); } } /* See if this console matches one we selected on the command line */ err = try_enable_preferred_console(newcon, true); /* If not, try to match against the platform default(s) */ if (err == -ENOENT) err = try_enable_preferred_console(newcon, false); /* printk() messages are not printed to the Braille console. */ if (err || newcon->flags & CON_BRL) { if (newcon->flags & CON_NBCON) nbcon_free(newcon); goto unlock; } /* * If we have a bootconsole, and are switching to a real console, * don't print everything out again, since when the boot console, and * the real console are the same physical device, it's annoying to * see the beginning boot messages twice */ if (bootcon_registered && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) { newcon->flags &= ~CON_PRINTBUFFER; } newcon->dropped = 0; init_seq = get_init_console_seq(newcon, bootcon_registered); if (newcon->flags & CON_NBCON) { have_nbcon_console = true; nbcon_seq_force(newcon, init_seq); } else { have_legacy_console = true; newcon->seq = init_seq; } if (newcon->flags & CON_BOOT) have_boot_console = true; /* * If another context is actively using the hardware of this new * console, it will not be aware of the nbcon synchronization. This * is a risk that two contexts could access the hardware * simultaneously if this new console is used for atomic printing * and the other context is still using the hardware. * * Use the driver synchronization to ensure that the hardware is not * in use while this new console transitions to being registered. */ if (use_device_lock) newcon->device_lock(newcon, &flags); /* * Put this console in the list - keep the * preferred driver at the head of the list. */ if (hlist_empty(&console_list)) { /* Ensure CON_CONSDEV is always set for the head. */ newcon->flags |= CON_CONSDEV; hlist_add_head_rcu(&newcon->node, &console_list); } else if (newcon->flags & CON_CONSDEV) { /* Only the new head can have CON_CONSDEV set. */ console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV); hlist_add_head_rcu(&newcon->node, &console_list); } else { hlist_add_behind_rcu(&newcon->node, console_list.first); } /* * No need to synchronize SRCU here! The caller does not rely * on all contexts being able to see the new console before * register_console() completes. */ /* This new console is now registered. */ if (use_device_lock) newcon->device_unlock(newcon, flags); console_sysfs_notify(); /* * By unregistering the bootconsoles after we enable the real console * we get the "console xxx enabled" message on all the consoles - * boot consoles, real consoles, etc - this is to ensure that end * users know there might be something in the kernel's log buffer that * went to the bootconsole (that they do not see on the real console) */ con_printk(KERN_INFO, newcon, "enabled\n"); if (bootcon_registered && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && !keep_bootcon) { struct hlist_node *tmp; hlist_for_each_entry_safe(con, tmp, &console_list, node) { if (con->flags & CON_BOOT) unregister_console_locked(con); } } /* Changed console list, may require printer threads to start/stop. */ printk_kthreads_check_locked(); unlock: console_list_unlock(); } EXPORT_SYMBOL(register_console); /* Must be called under console_list_lock(). */ static int unregister_console_locked(struct console *console) { bool use_device_lock = (console->flags & CON_NBCON) && console->write_atomic; bool found_legacy_con = false; bool found_nbcon_con = false; bool found_boot_con = false; unsigned long flags; struct console *c; int res; lockdep_assert_console_list_lock_held(); con_printk(KERN_INFO, console, "disabled\n"); res = _braille_unregister_console(console); if (res < 0) return res; if (res > 0) return 0; if (!console_is_registered_locked(console)) res = -ENODEV; else if (console_is_usable(console, console->flags, true)) __pr_flush(console, 1000, true); /* Disable it unconditionally */ console_srcu_write_flags(console, console->flags & ~CON_ENABLED); if (res < 0) return res; /* * Use the driver synchronization to ensure that the hardware is not * in use while this console transitions to being unregistered. */ if (use_device_lock) console->device_lock(console, &flags); hlist_del_init_rcu(&console->node); if (use_device_lock) console->device_unlock(console, flags); /* * <HISTORICAL> * If this isn't the last console and it has CON_CONSDEV set, we * need to set it on the next preferred console. * </HISTORICAL> * * The above makes no sense as there is no guarantee that the next * console has any device attached. Oh well.... */ if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV) console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV); /* * Ensure that all SRCU list walks have completed. All contexts * must not be able to see this console in the list so that any * exit/cleanup routines can be performed safely. */ synchronize_srcu(&console_srcu); if (console->flags & CON_NBCON) nbcon_free(console); console_sysfs_notify(); if (console->exit) res = console->exit(console); /* * With this console gone, the global flags tracking registered * console types may have changed. Update them. */ for_each_console(c) { if (c->flags & CON_BOOT) found_boot_con = true; if (c->flags & CON_NBCON) found_nbcon_con = true; else found_legacy_con = true; } if (!found_boot_con) have_boot_console = found_boot_con; if (!found_legacy_con) have_legacy_console = found_legacy_con; if (!found_nbcon_con) have_nbcon_console = found_nbcon_con; /* Changed console list, may require printer threads to start/stop. */ printk_kthreads_check_locked(); return res; } int unregister_console(struct console *console) { int res; console_list_lock(); res = unregister_console_locked(console); console_list_unlock(); return res; } EXPORT_SYMBOL(unregister_console); /** * console_force_preferred_locked - force a registered console preferred * @con: The registered console to force preferred. * * Must be called under console_list_lock(). */ void console_force_preferred_locked(struct console *con) { struct console *cur_pref_con; if (!console_is_registered_locked(con)) return; cur_pref_con = console_first(); /* Already preferred? */ if (cur_pref_con == con) return; /* * Delete, but do not re-initialize the entry. This allows the console * to continue to appear registered (via any hlist_unhashed_lockless() * checks), even though it was briefly removed from the console list. */ hlist_del_rcu(&con->node); /* * Ensure that all SRCU list walks have completed so that the console * can be added to the beginning of the console list and its forward * list pointer can be re-initialized. */ synchronize_srcu(&console_srcu); con->flags |= CON_CONSDEV; WARN_ON(!con->device); /* Only the new head can have CON_CONSDEV set. */ console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV); hlist_add_head_rcu(&con->node, &console_list); } EXPORT_SYMBOL(console_force_preferred_locked); /* * Initialize the console device. This is called *early*, so * we can't necessarily depend on lots of kernel help here. * Just do some early initializations, and do the complex setup * later. */ void __init console_init(void) { int ret; initcall_t call; initcall_entry_t *ce; #ifdef CONFIG_NULL_TTY_DEFAULT_CONSOLE if (!console_set_on_cmdline) add_preferred_console("ttynull", 0, NULL); #endif /* Setup the default TTY line discipline. */ n_tty_init(); /* * set up the console device so that later boot sequences can * inform about problems etc.. */ ce = __con_initcall_start; trace_initcall_level("console"); while (ce < __con_initcall_end) { call = initcall_from_entry(ce); trace_initcall_start(call); ret = call(); trace_initcall_finish(call, ret); ce++; } } /* * Some boot consoles access data that is in the init section and which will * be discarded after the initcalls have been run. To make sure that no code * will access this data, unregister the boot consoles in a late initcall. * * If for some reason, such as deferred probe or the driver being a loadable * module, the real console hasn't registered yet at this point, there will * be a brief interval in which no messages are logged to the console, which * makes it difficult to diagnose problems that occur during this time. * * To mitigate this problem somewhat, only unregister consoles whose memory * intersects with the init section. Note that all other boot consoles will * get unregistered when the real preferred console is registered. */ static int __init printk_late_init(void) { struct hlist_node *tmp; struct console *con; int ret; console_list_lock(); hlist_for_each_entry_safe(con, tmp, &console_list, node) { if (!(con->flags & CON_BOOT)) continue; /* Check addresses that might be used for enabled consoles. */ if (init_section_intersects(con, sizeof(*con)) || init_section_contains(con->write, 0) || init_section_contains(con->read, 0) || init_section_contains(con->device, 0) || init_section_contains(con->unblank, 0) || init_section_contains(con->data, 0)) { /* * Please, consider moving the reported consoles out * of the init section. */ pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n", con->name, con->index); unregister_console_locked(con); } } console_list_unlock(); ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL, console_cpu_notify); WARN_ON(ret < 0); ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online", console_cpu_notify, NULL); WARN_ON(ret < 0); printk_sysctl_init(); return 0; } late_initcall(printk_late_init); #if defined CONFIG_PRINTK /* If @con is specified, only wait for that console. Otherwise wait for all. */ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms); unsigned long remaining_jiffies = timeout_jiffies; struct console_flush_type ft; struct console *c; u64 last_diff = 0; u64 printk_seq; short flags; int cookie; u64 diff; u64 seq; /* Sorry, pr_flush() will not work this early. */ if (system_state < SYSTEM_SCHEDULING) return false; might_sleep(); seq = prb_next_reserve_seq(prb); /* Flush the consoles so that records up to @seq are printed. */ printk_get_console_flush_type(&ft); if (ft.nbcon_atomic) nbcon_atomic_flush_pending(); if (ft.legacy_direct) { console_lock(); console_unlock(); } for (;;) { unsigned long begin_jiffies; unsigned long slept_jiffies; diff = 0; /* * Hold the console_lock to guarantee safe access to * console->seq. Releasing console_lock flushes more * records in case @seq is still not printed on all * usable consoles. * * Holding the console_lock is not necessary if there * are no legacy or boot consoles. However, such a * console could register at any time. Always hold the * console_lock as a precaution rather than * synchronizing against register_console(). */ console_lock(); cookie = console_srcu_read_lock(); for_each_console_srcu(c) { if (con && con != c) continue; flags = console_srcu_read_flags(c); /* * If consoles are not usable, it cannot be expected * that they make forward progress, so only increment * @diff for usable consoles. */ if (!console_is_usable(c, flags, true) && !console_is_usable(c, flags, false)) { continue; } if (flags & CON_NBCON) { printk_seq = nbcon_seq_read(c); } else { printk_seq = c->seq; } if (printk_seq < seq) diff += seq - printk_seq; } console_srcu_read_unlock(cookie); if (diff != last_diff && reset_on_progress) remaining_jiffies = timeout_jiffies; console_unlock(); /* Note: @diff is 0 if there are no usable consoles. */ if (diff == 0 || remaining_jiffies == 0) break; /* msleep(1) might sleep much longer. Check time by jiffies. */ begin_jiffies = jiffies; msleep(1); slept_jiffies = jiffies - begin_jiffies; remaining_jiffies -= min(slept_jiffies, remaining_jiffies); last_diff = diff; } return (diff == 0); } /** * pr_flush() - Wait for printing threads to catch up. * * @timeout_ms: The maximum time (in ms) to wait. * @reset_on_progress: Reset the timeout if forward progress is seen. * * A value of 0 for @timeout_ms means no waiting will occur. A value of -1 * represents infinite waiting. * * If @reset_on_progress is true, the timeout will be reset whenever any * printer has been seen to make some forward progress. * * Context: Process context. May sleep while acquiring console lock. * Return: true if all usable printers are caught up. */ bool pr_flush(int timeout_ms, bool reset_on_progress) { return __pr_flush(NULL, timeout_ms, reset_on_progress); } /* * Delayed printk version, for scheduler-internal messages: */ #define PRINTK_PENDING_WAKEUP 0x01 #define PRINTK_PENDING_OUTPUT 0x02 static DEFINE_PER_CPU(int, printk_pending); static void wake_up_klogd_work_func(struct irq_work *irq_work) { int pending = this_cpu_xchg(printk_pending, 0); if (pending & PRINTK_PENDING_OUTPUT) { if (force_legacy_kthread()) { if (printk_legacy_kthread) wake_up_interruptible(&legacy_wait); } else { if (console_trylock()) console_unlock(); } } if (pending & PRINTK_PENDING_WAKEUP) wake_up_interruptible(&log_wait); } static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func); static void __wake_up_klogd(int val) { if (!printk_percpu_data_ready()) return; preempt_disable(); /* * Guarantee any new records can be seen by tasks preparing to wait * before this context checks if the wait queue is empty. * * The full memory barrier within wq_has_sleeper() pairs with the full * memory barrier within set_current_state() of * prepare_to_wait_event(), which is called after ___wait_event() adds * the waiter but before it has checked the wait condition. * * This pairs with devkmsg_read:A and syslog_print:A. */ if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ (val & PRINTK_PENDING_OUTPUT)) { this_cpu_or(printk_pending, val); irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); } preempt_enable(); } /** * wake_up_klogd - Wake kernel logging daemon * * Use this function when new records have been added to the ringbuffer * and the console printing of those records has already occurred or is * known to be handled by some other context. This function will only * wake the logging daemon. * * Context: Any context. */ void wake_up_klogd(void) { __wake_up_klogd(PRINTK_PENDING_WAKEUP); } /** * defer_console_output - Wake kernel logging daemon and trigger * console printing in a deferred context * * Use this function when new records have been added to the ringbuffer, * this context is responsible for console printing those records, but * the current context is not allowed to perform the console printing. * Trigger an irq_work context to perform the console printing. This * function also wakes the logging daemon. * * Context: Any context. */ void defer_console_output(void) { /* * New messages may have been added directly to the ringbuffer * using vprintk_store(), so wake any waiters as well. */ __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); } void printk_trigger_flush(void) { defer_console_output(); } int vprintk_deferred(const char *fmt, va_list args) { return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args); } int _printk_deferred(const char *fmt, ...) { va_list args; int r; va_start(args, fmt); r = vprintk_deferred(fmt, args); va_end(args); return r; } /* * printk rate limiting, lifted from the networking subsystem. * * This enforces a rate limit: not more than 10 kernel messages * every 5s to make a denial-of-service attack impossible. */ DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10); int __printk_ratelimit(const char *func) { return ___ratelimit(&printk_ratelimit_state, func); } EXPORT_SYMBOL(__printk_ratelimit); /** * printk_timed_ratelimit - caller-controlled printk ratelimiting * @caller_jiffies: pointer to caller's state * @interval_msecs: minimum interval between prints * * printk_timed_ratelimit() returns true if more than @interval_msecs * milliseconds have elapsed since the last time printk_timed_ratelimit() * returned true. */ bool printk_timed_ratelimit(unsigned long *caller_jiffies, unsigned int interval_msecs) { unsigned long elapsed = jiffies - *caller_jiffies; if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs)) return false; *caller_jiffies = jiffies; return true; } EXPORT_SYMBOL(printk_timed_ratelimit); static DEFINE_SPINLOCK(dump_list_lock); static LIST_HEAD(dump_list); /** * kmsg_dump_register - register a kernel log dumper. * @dumper: pointer to the kmsg_dumper structure * * Adds a kernel log dumper to the system. The dump callback in the * structure will be called when the kernel oopses or panics and must be * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise. */ int kmsg_dump_register(struct kmsg_dumper *dumper) { unsigned long flags; int err = -EBUSY; /* The dump callback needs to be set */ if (!dumper->dump) return -EINVAL; spin_lock_irqsave(&dump_list_lock, flags); /* Don't allow registering multiple times */ if (!dumper->registered) { dumper->registered = 1; list_add_tail_rcu(&dumper->list, &dump_list); err = 0; } spin_unlock_irqrestore(&dump_list_lock, flags); return err; } EXPORT_SYMBOL_GPL(kmsg_dump_register); /** * kmsg_dump_unregister - unregister a kmsg dumper. * @dumper: pointer to the kmsg_dumper structure * * Removes a dump device from the system. Returns zero on success and * %-EINVAL otherwise. */ int kmsg_dump_unregister(struct kmsg_dumper *dumper) { unsigned long flags; int err = -EINVAL; spin_lock_irqsave(&dump_list_lock, flags); if (dumper->registered) { dumper->registered = 0; list_del_rcu(&dumper->list); err = 0; } spin_unlock_irqrestore(&dump_list_lock, flags); synchronize_rcu(); return err; } EXPORT_SYMBOL_GPL(kmsg_dump_unregister); static bool always_kmsg_dump; module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR); const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason) { switch (reason) { case KMSG_DUMP_PANIC: return "Panic"; case KMSG_DUMP_OOPS: return "Oops"; case KMSG_DUMP_EMERG: return "Emergency"; case KMSG_DUMP_SHUTDOWN: return "Shutdown"; default: return "Unknown"; } } EXPORT_SYMBOL_GPL(kmsg_dump_reason_str); /** * kmsg_dump_desc - dump kernel log to kernel message dumpers. * @reason: the reason (oops, panic etc) for dumping * @desc: a short string to describe what caused the panic or oops. Can be NULL * if no additional description is available. * * Call each of the registered dumper's dump() callback, which can * retrieve the kmsg records with kmsg_dump_get_line() or * kmsg_dump_get_buffer(). */ void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc) { struct kmsg_dumper *dumper; struct kmsg_dump_detail detail = { .reason = reason, .description = desc}; rcu_read_lock(); list_for_each_entry_rcu(dumper, &dump_list, list) { enum kmsg_dump_reason max_reason = dumper->max_reason; /* * If client has not provided a specific max_reason, default * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set. */ if (max_reason == KMSG_DUMP_UNDEF) { max_reason = always_kmsg_dump ? KMSG_DUMP_MAX : KMSG_DUMP_OOPS; } if (reason > max_reason) continue; /* invoke dumper which will iterate over records */ dumper->dump(dumper, &detail); } rcu_read_unlock(); } /** * kmsg_dump_get_line - retrieve one kmsg log line * @iter: kmsg dump iterator * @syslog: include the "<4>" prefixes * @line: buffer to copy the line to * @size: maximum size of the buffer * @len: length of line placed into buffer * * Start at the beginning of the kmsg buffer, with the oldest kmsg * record, and copy one record into the provided buffer. * * Consecutive calls will return the next available record moving * towards the end of the buffer with the youngest messages. * * A return value of FALSE indicates that there are no more records to * read. */ bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, char *line, size_t size, size_t *len) { u64 min_seq = latched_seq_read_nolock(&clear_seq); struct printk_info info; unsigned int line_count; struct printk_record r; size_t l = 0; bool ret = false; if (iter->cur_seq < min_seq) iter->cur_seq = min_seq; prb_rec_init_rd(&r, &info, line, size); /* Read text or count text lines? */ if (line) { if (!prb_read_valid(prb, iter->cur_seq, &r)) goto out; l = record_print_text(&r, syslog, printk_time); } else { if (!prb_read_valid_info(prb, iter->cur_seq, &info, &line_count)) { goto out; } l = get_record_print_text_size(&info, line_count, syslog, printk_time); } iter->cur_seq = r.info->seq + 1; ret = true; out: if (len) *len = l; return ret; } EXPORT_SYMBOL_GPL(kmsg_dump_get_line); /** * kmsg_dump_get_buffer - copy kmsg log lines * @iter: kmsg dump iterator * @syslog: include the "<4>" prefixes * @buf: buffer to copy the line to * @size: maximum size of the buffer * @len_out: length of line placed into buffer * * Start at the end of the kmsg buffer and fill the provided buffer * with as many of the *youngest* kmsg records that fit into it. * If the buffer is large enough, all available kmsg records will be * copied with a single call. * * Consecutive calls will fill the buffer with the next block of * available older records, not including the earlier retrieved ones. * * A return value of FALSE indicates that there are no more records to * read. */ bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog, char *buf, size_t size, size_t *len_out) { u64 min_seq = latched_seq_read_nolock(&clear_seq); struct printk_info info; struct printk_record r; u64 seq; u64 next_seq; size_t len = 0; bool ret = false; bool time = printk_time; if (!buf || !size) goto out; if (iter->cur_seq < min_seq) iter->cur_seq = min_seq; if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) { if (info.seq != iter->cur_seq) { /* messages are gone, move to first available one */ iter->cur_seq = info.seq; } } /* last entry */ if (iter->cur_seq >= iter->next_seq) goto out; /* * Find first record that fits, including all following records, * into the user-provided buffer for this dump. Pass in size-1 * because this function (by way of record_print_text()) will * not write more than size-1 bytes of text into @buf. */ seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq, size - 1, syslog, time); /* * Next kmsg_dump_get_buffer() invocation will dump block of * older records stored right before this one. */ next_seq = seq; prb_rec_init_rd(&r, &info, buf, size); prb_for_each_record(seq, prb, seq, &r) { if (r.info->seq >= iter->next_seq) break; len += record_print_text(&r, syslog, time); /* Adjust record to store to remaining buffer space. */ prb_rec_init_rd(&r, &info, buf + len, size - len); } iter->next_seq = next_seq; ret = true; out: if (len_out) *len_out = len; return ret; } EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer); /** * kmsg_dump_rewind - reset the iterator * @iter: kmsg dump iterator * * Reset the dumper's iterator so that kmsg_dump_get_line() and * kmsg_dump_get_buffer() can be called again and used multiple * times within the same dumper.dump() callback. */ void kmsg_dump_rewind(struct kmsg_dump_iter *iter) { iter->cur_seq = latched_seq_read_nolock(&clear_seq); iter->next_seq = prb_next_seq(prb); } EXPORT_SYMBOL_GPL(kmsg_dump_rewind); /** * console_try_replay_all - try to replay kernel log on consoles * * Try to obtain lock on console subsystem and replay all * available records in printk buffer on the consoles. * Does nothing if lock is not obtained. * * Context: Any, except for NMI. */ void console_try_replay_all(void) { struct console_flush_type ft; printk_get_console_flush_type(&ft); if (console_trylock()) { __console_rewind_all(); if (ft.nbcon_atomic) nbcon_atomic_flush_pending(); if (ft.nbcon_offload) nbcon_kthreads_wake(); if (ft.legacy_offload) defer_console_output(); /* Consoles are flushed as part of console_unlock(). */ console_unlock(); } } #endif #ifdef CONFIG_SMP static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1); static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0); bool is_printk_cpu_sync_owner(void) { return (atomic_read(&printk_cpu_sync_owner) == raw_smp_processor_id()); } /** * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant * spinning lock is not owned by any CPU. * * Context: Any context. */ void __printk_cpu_sync_wait(void) { do { cpu_relax(); } while (atomic_read(&printk_cpu_sync_owner) != -1); } EXPORT_SYMBOL(__printk_cpu_sync_wait); /** * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant * spinning lock. * * If no processor has the lock, the calling processor takes the lock and * becomes the owner. If the calling processor is already the owner of the * lock, this function succeeds immediately. * * Context: Any context. Expects interrupts to be disabled. * Return: 1 on success, otherwise 0. */ int __printk_cpu_sync_try_get(void) { int cpu; int old; cpu = smp_processor_id(); /* * Guarantee loads and stores from this CPU when it is the lock owner * are _not_ visible to the previous lock owner. This pairs with * __printk_cpu_sync_put:B. * * Memory barrier involvement: * * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, * then __printk_cpu_sync_put:A can never read from * __printk_cpu_sync_try_get:B. * * Relies on: * * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B * of the previous CPU * matching * ACQUIRE from __printk_cpu_sync_try_get:A to * __printk_cpu_sync_try_get:B of this CPU */ old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1, cpu); /* LMM(__printk_cpu_sync_try_get:A) */ if (old == -1) { /* * This CPU is now the owner and begins loading/storing * data: LMM(__printk_cpu_sync_try_get:B) */ return 1; } else if (old == cpu) { /* This CPU is already the owner. */ atomic_inc(&printk_cpu_sync_nested); return 1; } return 0; } EXPORT_SYMBOL(__printk_cpu_sync_try_get); /** * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock. * * The calling processor must be the owner of the lock. * * Context: Any context. Expects interrupts to be disabled. */ void __printk_cpu_sync_put(void) { if (atomic_read(&printk_cpu_sync_nested)) { atomic_dec(&printk_cpu_sync_nested); return; } /* * This CPU is finished loading/storing data: * LMM(__printk_cpu_sync_put:A) */ /* * Guarantee loads and stores from this CPU when it was the * lock owner are visible to the next lock owner. This pairs * with __printk_cpu_sync_try_get:A. * * Memory barrier involvement: * * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A. * * Relies on: * * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B * of this CPU * matching * ACQUIRE from __printk_cpu_sync_try_get:A to * __printk_cpu_sync_try_get:B of the next CPU */ atomic_set_release(&printk_cpu_sync_owner, -1); /* LMM(__printk_cpu_sync_put:B) */ } EXPORT_SYMBOL(__printk_cpu_sync_put); #endif /* CONFIG_SMP */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_PROJID_H #define _LINUX_PROJID_H /* * A set of types for the internal kernel types representing project ids. * * The types defined in this header allow distinguishing which project ids in * the kernel are values used by userspace and which project id values are * the internal kernel values. With the addition of user namespaces the values * can be different. Using the type system makes it possible for the compiler * to detect when we overlook these differences. * */ #include <linux/types.h> struct user_namespace; extern struct user_namespace init_user_ns; typedef __kernel_uid32_t projid_t; typedef struct { projid_t val; } kprojid_t; static inline projid_t __kprojid_val(kprojid_t projid) { return projid.val; } #define KPROJIDT_INIT(value) (kprojid_t){ value } #define INVALID_PROJID KPROJIDT_INIT(-1) #define OVERFLOW_PROJID 65534 static inline bool projid_eq(kprojid_t left, kprojid_t right) { return __kprojid_val(left) == __kprojid_val(right); } static inline bool projid_lt(kprojid_t left, kprojid_t right) { return __kprojid_val(left) < __kprojid_val(right); } static inline bool projid_valid(kprojid_t projid) { return !projid_eq(projid, INVALID_PROJID); } #ifdef CONFIG_USER_NS extern kprojid_t make_kprojid(struct user_namespace *from, projid_t projid); extern projid_t from_kprojid(struct user_namespace *to, kprojid_t projid); extern projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t projid); static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid) { return from_kprojid(ns, projid) != (projid_t)-1; } #else static inline kprojid_t make_kprojid(struct user_namespace *from, projid_t projid) { return KPROJIDT_INIT(projid); } static inline projid_t from_kprojid(struct user_namespace *to, kprojid_t kprojid) { return __kprojid_val(kprojid); } static inline projid_t from_kprojid_munged(struct user_namespace *to, kprojid_t kprojid) { projid_t projid = from_kprojid(to, kprojid); if (projid == (projid_t)-1) projid = OVERFLOW_PROJID; return projid; } static inline bool kprojid_has_mapping(struct user_namespace *ns, kprojid_t projid) { return true; } #endif /* CONFIG_USER_NS */ #endif /* _LINUX_PROJID_H */
8 8 5 5 1 1 1 4 4 3 2 1 1 2 2 2 2 1 1 1 1 1 1 1 5 4 4 9 1 8 2 1 1 4 4 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 // SPDX-License-Identifier: GPL-2.0-only /* * hid-cp2112.c - Silicon Labs HID USB to SMBus master bridge * Copyright (c) 2013,2014 Uplogix, Inc. * David Barksdale <dbarksdale@uplogix.com> */ /* * The Silicon Labs CP2112 chip is a USB HID device which provides an * SMBus controller for talking to slave devices and 8 GPIO pins. The * host communicates with the CP2112 via raw HID reports. * * Data Sheet: * https://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf * Programming Interface Specification: * https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf */ #include <linux/bitops.h> #include <linux/cleanup.h> #include <linux/gpio/driver.h> #include <linux/hid.h> #include <linux/hidraw.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nls.h> #include <linux/string_choices.h> #include <linux/usb/ch9.h> #include "hid-ids.h" #define CP2112_REPORT_MAX_LENGTH 64 #define CP2112_GPIO_CONFIG_LENGTH 5 #define CP2112_GPIO_GET_LENGTH 2 #define CP2112_GPIO_SET_LENGTH 3 #define CP2112_GPIO_MAX_GPIO 8 #define CP2112_GPIO_ALL_GPIO_MASK GENMASK(7, 0) enum { CP2112_GPIO_CONFIG = 0x02, CP2112_GPIO_GET = 0x03, CP2112_GPIO_SET = 0x04, CP2112_GET_VERSION_INFO = 0x05, CP2112_SMBUS_CONFIG = 0x06, CP2112_DATA_READ_REQUEST = 0x10, CP2112_DATA_WRITE_READ_REQUEST = 0x11, CP2112_DATA_READ_FORCE_SEND = 0x12, CP2112_DATA_READ_RESPONSE = 0x13, CP2112_DATA_WRITE_REQUEST = 0x14, CP2112_TRANSFER_STATUS_REQUEST = 0x15, CP2112_TRANSFER_STATUS_RESPONSE = 0x16, CP2112_CANCEL_TRANSFER = 0x17, CP2112_LOCK_BYTE = 0x20, CP2112_USB_CONFIG = 0x21, CP2112_MANUFACTURER_STRING = 0x22, CP2112_PRODUCT_STRING = 0x23, CP2112_SERIAL_STRING = 0x24, }; enum { STATUS0_IDLE = 0x00, STATUS0_BUSY = 0x01, STATUS0_COMPLETE = 0x02, STATUS0_ERROR = 0x03, }; enum { STATUS1_TIMEOUT_NACK = 0x00, STATUS1_TIMEOUT_BUS = 0x01, STATUS1_ARBITRATION_LOST = 0x02, STATUS1_READ_INCOMPLETE = 0x03, STATUS1_WRITE_INCOMPLETE = 0x04, STATUS1_SUCCESS = 0x05, }; struct cp2112_smbus_config_report { u8 report; /* CP2112_SMBUS_CONFIG */ __be32 clock_speed; /* Hz */ u8 device_address; /* Stored in the upper 7 bits */ u8 auto_send_read; /* 1 = enabled, 0 = disabled */ __be16 write_timeout; /* ms, 0 = no timeout */ __be16 read_timeout; /* ms, 0 = no timeout */ u8 scl_low_timeout; /* 1 = enabled, 0 = disabled */ __be16 retry_time; /* # of retries, 0 = no limit */ } __packed; struct cp2112_usb_config_report { u8 report; /* CP2112_USB_CONFIG */ __le16 vid; /* Vendor ID */ __le16 pid; /* Product ID */ u8 max_power; /* Power requested in 2mA units */ u8 power_mode; /* 0x00 = bus powered 0x01 = self powered & regulator off 0x02 = self powered & regulator on */ u8 release_major; u8 release_minor; u8 mask; /* What fields to program */ } __packed; struct cp2112_read_req_report { u8 report; /* CP2112_DATA_READ_REQUEST */ u8 slave_address; __be16 length; } __packed; struct cp2112_write_read_req_report { u8 report; /* CP2112_DATA_WRITE_READ_REQUEST */ u8 slave_address; __be16 length; u8 target_address_length; u8 target_address[16]; } __packed; struct cp2112_write_req_report { u8 report; /* CP2112_DATA_WRITE_REQUEST */ u8 slave_address; u8 length; u8 data[61]; } __packed; struct cp2112_force_read_report { u8 report; /* CP2112_DATA_READ_FORCE_SEND */ __be16 length; } __packed; struct cp2112_xfer_status_report { u8 report; /* CP2112_TRANSFER_STATUS_RESPONSE */ u8 status0; /* STATUS0_* */ u8 status1; /* STATUS1_* */ __be16 retries; __be16 length; } __packed; struct cp2112_string_report { u8 dummy; /* force .string to be aligned */ struct_group_attr(contents, __packed, u8 report; /* CP2112_*_STRING */ u8 length; /* length in bytes of everything after .report */ u8 type; /* USB_DT_STRING */ wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */ ); } __packed; /* Number of times to request transfer status before giving up waiting for a transfer to complete. This may need to be changed if SMBUS clock, retries, or read/write/scl_low timeout settings are changed. */ static const int XFER_STATUS_RETRIES = 10; /* Time in ms to wait for a CP2112_DATA_READ_RESPONSE or CP2112_TRANSFER_STATUS_RESPONSE. */ static const int RESPONSE_TIMEOUT = 50; static const struct hid_device_id cp2112_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, { } }; MODULE_DEVICE_TABLE(hid, cp2112_devices); struct cp2112_device { struct i2c_adapter adap; struct hid_device *hdev; wait_queue_head_t wait; u8 read_data[61]; u8 read_length; u8 hwversion; int xfer_status; atomic_t read_avail; atomic_t xfer_avail; struct gpio_chip gc; u8 *in_out_buffer; struct mutex lock; bool gpio_poll; struct delayed_work gpio_poll_worker; unsigned long irq_mask; u8 gpio_prev_state; }; static int gpio_push_pull = CP2112_GPIO_ALL_GPIO_MASK; module_param(gpio_push_pull, int, 0644); MODULE_PARM_DESC(gpio_push_pull, "GPIO push-pull configuration bitmask"); static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; guard(mutex)(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); if (ret >= 0) ret = -EIO; return ret; } buf[1] &= ~BIT(offset); buf[2] = gpio_push_pull; ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error setting GPIO config: %d\n", ret); if (ret >= 0) ret = -EIO; return ret; } return 0; } static int cp2112_gpio_set_unlocked(struct cp2112_device *dev, unsigned int offset, int value) { struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; buf[0] = CP2112_GPIO_SET; buf[1] = value ? CP2112_GPIO_ALL_GPIO_MASK : 0; buf[2] = BIT(offset); ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) hid_err(hdev, "error setting GPIO values: %d\n", ret); return ret; } static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); guard(mutex)(&dev->lock); return cp2112_gpio_set_unlocked(dev, offset, value); } static int cp2112_gpio_get_all(struct gpio_chip *chip) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; guard(mutex)(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_GET_LENGTH) { hid_err(hdev, "error requesting GPIO values: %d\n", ret); return ret < 0 ? ret : -EIO; } return buf[1]; } static int cp2112_gpio_get(struct gpio_chip *chip, unsigned int offset) { int ret; ret = cp2112_gpio_get_all(chip); if (ret < 0) return ret; return (ret >> offset) & 1; } static int cp2112_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { struct cp2112_device *dev = gpiochip_get_data(chip); struct hid_device *hdev = dev->hdev; u8 *buf = dev->in_out_buffer; int ret; guard(mutex)(&dev->lock); ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret != CP2112_GPIO_CONFIG_LENGTH) { hid_err(hdev, "error requesting GPIO config: %d\n", ret); return ret < 0 ? ret : -EIO; } buf[1] |= 1 << offset; buf[2] = gpio_push_pull; ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(hdev, "error setting GPIO config: %d\n", ret); return ret; } /* * Set gpio value when output direction is already set, * as specified in AN495, Rev. 0.2, cpt. 4.4 */ cp2112_gpio_set_unlocked(dev, offset, value); return 0; } static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, u8 *data, size_t count, unsigned char report_type) { u8 *buf; int ret; buf = kmalloc(count, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_raw_request(hdev, report_number, buf, count, report_type, HID_REQ_GET_REPORT); memcpy(data, buf, count); kfree(buf); return ret; } static int cp2112_hid_output(struct hid_device *hdev, u8 *data, size_t count, unsigned char report_type) { u8 *buf; int ret; buf = kmemdup(data, count, GFP_KERNEL); if (!buf) return -ENOMEM; if (report_type == HID_OUTPUT_REPORT) ret = hid_hw_output_report(hdev, buf, count); else ret = hid_hw_raw_request(hdev, buf[0], buf, count, report_type, HID_REQ_SET_REPORT); kfree(buf); return ret; } static int cp2112_wait(struct cp2112_device *dev, atomic_t *avail) { int ret = 0; /* We have sent either a CP2112_TRANSFER_STATUS_REQUEST or a * CP2112_DATA_READ_FORCE_SEND and we are waiting for the response to * come in cp2112_raw_event or timeout. There will only be one of these * in flight at any one time. The timeout is extremely large and is a * last resort if the CP2112 has died. If we do timeout we don't expect * to receive the response which would cause data races, it's not like * we can do anything about it anyway. */ ret = wait_event_interruptible_timeout(dev->wait, atomic_read(avail), msecs_to_jiffies(RESPONSE_TIMEOUT)); if (-ERESTARTSYS == ret) return ret; if (!ret) return -ETIMEDOUT; atomic_set(avail, 0); return 0; } static int cp2112_xfer_status(struct cp2112_device *dev) { struct hid_device *hdev = dev->hdev; u8 buf[2]; int ret; buf[0] = CP2112_TRANSFER_STATUS_REQUEST; buf[1] = 0x01; atomic_set(&dev->xfer_avail, 0); ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error requesting status: %d\n", ret); return ret; } ret = cp2112_wait(dev, &dev->xfer_avail); if (ret) return ret; return dev->xfer_status; } static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size) { struct hid_device *hdev = dev->hdev; struct cp2112_force_read_report report; int ret; if (size > sizeof(dev->read_data)) size = sizeof(dev->read_data); report.report = CP2112_DATA_READ_FORCE_SEND; report.length = cpu_to_be16(size); atomic_set(&dev->read_avail, 0); ret = cp2112_hid_output(hdev, &report.report, sizeof(report), HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error requesting data: %d\n", ret); return ret; } ret = cp2112_wait(dev, &dev->read_avail); if (ret) return ret; hid_dbg(hdev, "read %d of %zd bytes requested\n", dev->read_length, size); if (size > dev->read_length) size = dev->read_length; memcpy(data, dev->read_data, size); return dev->read_length; } static int cp2112_read_req(void *buf, u8 slave_address, u16 length) { struct cp2112_read_req_report *report = buf; if (length < 1 || length > 512) return -EINVAL; report->report = CP2112_DATA_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(length); return sizeof(*report); } static int cp2112_write_read_req(void *buf, u8 slave_address, u16 length, u8 command, u8 *data, u8 data_length) { struct cp2112_write_read_req_report *report = buf; if (length < 1 || length > 512 || data_length > sizeof(report->target_address) - 1) return -EINVAL; report->report = CP2112_DATA_WRITE_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(length); report->target_address_length = data_length + 1; report->target_address[0] = command; memcpy(&report->target_address[1], data, data_length); return data_length + 6; } static int cp2112_write_req(void *buf, u8 slave_address, u8 command, u8 *data, u8 data_length) { struct cp2112_write_req_report *report = buf; if (data_length > sizeof(report->data) - 1) return -EINVAL; report->report = CP2112_DATA_WRITE_REQUEST; report->slave_address = slave_address << 1; report->length = data_length + 1; report->data[0] = command; memcpy(&report->data[1], data, data_length); return data_length + 4; } static int cp2112_i2c_write_req(void *buf, u8 slave_address, u8 *data, u8 data_length) { struct cp2112_write_req_report *report = buf; if (data_length > sizeof(report->data)) return -EINVAL; report->report = CP2112_DATA_WRITE_REQUEST; report->slave_address = slave_address << 1; report->length = data_length; memcpy(report->data, data, data_length); return data_length + 3; } static int cp2112_i2c_write_read_req(void *buf, u8 slave_address, u8 *addr, int addr_length, int read_length) { struct cp2112_write_read_req_report *report = buf; if (read_length < 1 || read_length > 512 || addr_length > sizeof(report->target_address)) return -EINVAL; report->report = CP2112_DATA_WRITE_READ_REQUEST; report->slave_address = slave_address << 1; report->length = cpu_to_be16(read_length); report->target_address_length = addr_length; memcpy(report->target_address, addr, addr_length); return addr_length + 5; } static int cp2112_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; ssize_t count; ssize_t read_length = 0; u8 *read_buf = NULL; unsigned int retries; int ret; hid_dbg(hdev, "I2C %d messages\n", num); if (num == 1) { hid_dbg(hdev, "I2C %s %#04x len %d\n", str_read_write(msgs->flags & I2C_M_RD), msgs->addr, msgs->len); if (msgs->flags & I2C_M_RD) { read_length = msgs->len; read_buf = msgs->buf; count = cp2112_read_req(buf, msgs->addr, msgs->len); } else { count = cp2112_i2c_write_req(buf, msgs->addr, msgs->buf, msgs->len); } if (count < 0) return count; } else if (dev->hwversion > 1 && /* no repeated start in rev 1 */ num == 2 && msgs[0].addr == msgs[1].addr && !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) { hid_dbg(hdev, "I2C write-read %#04x wlen %d rlen %d\n", msgs[0].addr, msgs[0].len, msgs[1].len); read_length = msgs[1].len; read_buf = msgs[1].buf; count = cp2112_i2c_write_read_req(buf, msgs[0].addr, msgs[0].buf, msgs[0].len, msgs[1].len); if (count < 0) return count; } else { hid_err(hdev, "Multi-message I2C transactions not supported\n"); return -EOPNOTSUPP; } ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); return ret; } ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error starting transaction: %d\n", ret); goto power_normal; } for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) { ret = cp2112_xfer_status(dev); if (-EBUSY == ret) continue; if (ret < 0) goto power_normal; break; } if (XFER_STATUS_RETRIES <= retries) { hid_warn(hdev, "Transfer timed out, cancelling.\n"); buf[0] = CP2112_CANCEL_TRANSFER; buf[1] = 0x01; ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) hid_warn(hdev, "Error cancelling transaction: %d\n", ret); ret = -ETIMEDOUT; goto power_normal; } for (count = 0; count < read_length;) { ret = cp2112_read(dev, read_buf + count, read_length - count); if (ret < 0) goto power_normal; if (ret == 0) { hid_err(hdev, "read returned 0\n"); ret = -EIO; goto power_normal; } count += ret; if (count > read_length) { /* * The hardware returned too much data. * This is mostly harmless because cp2112_read() * has a limit check so didn't overrun our * buffer. Nevertheless, we return an error * because something is seriously wrong and * it shouldn't go unnoticed. */ hid_err(hdev, "long read: %d > %zd\n", ret, read_length - count + ret); ret = -EIO; goto power_normal; } } /* return the number of transferred messages */ ret = num; power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); hid_dbg(hdev, "I2C transfer finished: %d\n", ret); return ret; } static int cp2112_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data; struct hid_device *hdev = dev->hdev; u8 buf[64]; __le16 word; ssize_t count; size_t read_length = 0; unsigned int retries; int ret; hid_dbg(hdev, "%s addr 0x%x flags 0x%x cmd 0x%x size %d\n", str_write_read(read_write == I2C_SMBUS_WRITE), addr, flags, command, size); switch (size) { case I2C_SMBUS_BYTE: read_length = 1; if (I2C_SMBUS_READ == read_write) count = cp2112_read_req(buf, addr, read_length); else count = cp2112_write_req(buf, addr, command, NULL, 0); break; case I2C_SMBUS_BYTE_DATA: read_length = 1; if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); else count = cp2112_write_req(buf, addr, command, &data->byte, 1); break; case I2C_SMBUS_WORD_DATA: read_length = 2; word = cpu_to_le16(data->word); if (I2C_SMBUS_READ == read_write) count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); else count = cp2112_write_req(buf, addr, command, (u8 *)&word, 2); break; case I2C_SMBUS_PROC_CALL: size = I2C_SMBUS_WORD_DATA; read_write = I2C_SMBUS_READ; read_length = 2; word = cpu_to_le16(data->word); count = cp2112_write_read_req(buf, addr, read_length, command, (u8 *)&word, 2); break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_write == I2C_SMBUS_READ) { read_length = data->block[0]; count = cp2112_write_read_req(buf, addr, read_length, command, NULL, 0); } else { count = cp2112_write_req(buf, addr, command, data->block + 1, data->block[0]); } break; case I2C_SMBUS_BLOCK_DATA: if (I2C_SMBUS_READ == read_write) { count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, NULL, 0); } else { count = cp2112_write_req(buf, addr, command, data->block, data->block[0] + 1); } break; case I2C_SMBUS_BLOCK_PROC_CALL: size = I2C_SMBUS_BLOCK_DATA; read_write = I2C_SMBUS_READ; count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX, command, data->block, data->block[0] + 1); break; default: hid_warn(hdev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } if (count < 0) return count; ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); return ret; } ret = cp2112_hid_output(hdev, buf, count, HID_OUTPUT_REPORT); if (ret < 0) { hid_warn(hdev, "Error starting transaction: %d\n", ret); goto power_normal; } for (retries = 0; retries < XFER_STATUS_RETRIES; ++retries) { ret = cp2112_xfer_status(dev); if (-EBUSY == ret) continue; if (ret < 0) goto power_normal; break; } if (XFER_STATUS_RETRIES <= retries) { hid_warn(hdev, "Transfer timed out, cancelling.\n"); buf[0] = CP2112_CANCEL_TRANSFER; buf[1] = 0x01; ret = cp2112_hid_output(hdev, buf, 2, HID_OUTPUT_REPORT); if (ret < 0) hid_warn(hdev, "Error cancelling transaction: %d\n", ret); ret = -ETIMEDOUT; goto power_normal; } if (I2C_SMBUS_WRITE == read_write) { ret = 0; goto power_normal; } if (I2C_SMBUS_BLOCK_DATA == size) read_length = ret; ret = cp2112_read(dev, buf, read_length); if (ret < 0) goto power_normal; if (ret != read_length) { hid_warn(hdev, "short read: %d < %zd\n", ret, read_length); ret = -EIO; goto power_normal; } switch (size) { case I2C_SMBUS_BYTE: case I2C_SMBUS_BYTE_DATA: data->byte = buf[0]; break; case I2C_SMBUS_WORD_DATA: data->word = le16_to_cpup((__le16 *)buf); break; case I2C_SMBUS_I2C_BLOCK_DATA: if (read_length > I2C_SMBUS_BLOCK_MAX) { ret = -EINVAL; goto power_normal; } memcpy(data->block + 1, buf, read_length); break; case I2C_SMBUS_BLOCK_DATA: if (read_length > I2C_SMBUS_BLOCK_MAX) { ret = -EPROTO; goto power_normal; } memcpy(data->block, buf, read_length); break; } ret = 0; power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); hid_dbg(hdev, "transfer finished: %d\n", ret); return ret; } static u32 cp2112_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK | I2C_FUNC_SMBUS_PROC_CALL | I2C_FUNC_SMBUS_BLOCK_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .master_xfer = cp2112_i2c_xfer, .smbus_xfer = cp2112_xfer, .functionality = cp2112_functionality, }; static int cp2112_get_usb_config(struct hid_device *hdev, struct cp2112_usb_config_report *cfg) { int ret; ret = cp2112_hid_get(hdev, CP2112_USB_CONFIG, (u8 *)cfg, sizeof(*cfg), HID_FEATURE_REPORT); if (ret != sizeof(*cfg)) { hid_err(hdev, "error reading usb config: %d\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static int cp2112_set_usb_config(struct hid_device *hdev, struct cp2112_usb_config_report *cfg) { int ret; if (WARN_ON(cfg->report != CP2112_USB_CONFIG)) return -EINVAL; ret = cp2112_hid_output(hdev, (u8 *)cfg, sizeof(*cfg), HID_FEATURE_REPORT); if (ret != sizeof(*cfg)) { hid_err(hdev, "error writing usb config: %d\n", ret); if (ret < 0) return ret; return -EIO; } return 0; } static void chmod_sysfs_attrs(struct hid_device *hdev); #define CP2112_CONFIG_ATTR(name, store, format, ...) \ static ssize_t name##_store(struct device *kdev, \ struct device_attribute *attr, const char *buf, \ size_t count) \ { \ struct hid_device *hdev = to_hid_device(kdev); \ struct cp2112_usb_config_report cfg; \ int ret = cp2112_get_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ store; \ ret = cp2112_set_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ chmod_sysfs_attrs(hdev); \ return count; \ } \ static ssize_t name##_show(struct device *kdev, \ struct device_attribute *attr, char *buf) \ { \ struct hid_device *hdev = to_hid_device(kdev); \ struct cp2112_usb_config_report cfg; \ int ret = cp2112_get_usb_config(hdev, &cfg); \ if (ret) \ return ret; \ return sysfs_emit(buf, format, ##__VA_ARGS__); \ } \ static DEVICE_ATTR_RW(name); CP2112_CONFIG_ATTR(vendor_id, ({ u16 vid; if (sscanf(buf, "%hi", &vid) != 1) return -EINVAL; cfg.vid = cpu_to_le16(vid); cfg.mask = 0x01; }), "0x%04x\n", le16_to_cpu(cfg.vid)); CP2112_CONFIG_ATTR(product_id, ({ u16 pid; if (sscanf(buf, "%hi", &pid) != 1) return -EINVAL; cfg.pid = cpu_to_le16(pid); cfg.mask = 0x02; }), "0x%04x\n", le16_to_cpu(cfg.pid)); CP2112_CONFIG_ATTR(max_power, ({ int mA; if (sscanf(buf, "%i", &mA) != 1) return -EINVAL; cfg.max_power = (mA + 1) / 2; cfg.mask = 0x04; }), "%u mA\n", cfg.max_power * 2); CP2112_CONFIG_ATTR(power_mode, ({ if (sscanf(buf, "%hhi", &cfg.power_mode) != 1) return -EINVAL; cfg.mask = 0x08; }), "%u\n", cfg.power_mode); CP2112_CONFIG_ATTR(release_version, ({ if (sscanf(buf, "%hhi.%hhi", &cfg.release_major, &cfg.release_minor) != 2) return -EINVAL; cfg.mask = 0x10; }), "%u.%u\n", cfg.release_major, cfg.release_minor); #undef CP2112_CONFIG_ATTR static ssize_t pstr_store(struct device *kdev, struct device_attribute *kattr, const char *buf, size_t count, int number) { struct hid_device *hdev = to_hid_device(kdev); struct cp2112_string_report report; int ret; memset(&report, 0, sizeof(report)); ret = utf8s_to_utf16s(buf, count, UTF16_LITTLE_ENDIAN, report.string, ARRAY_SIZE(report.string)); report.report = number; report.length = ret * sizeof(report.string[0]) + 2; report.type = USB_DT_STRING; ret = cp2112_hid_output(hdev, &report.report, report.length + 1, HID_FEATURE_REPORT); if (ret != report.length + 1) { hid_err(hdev, "error writing %s string: %d\n", kattr->attr.name, ret); if (ret < 0) return ret; return -EIO; } chmod_sysfs_attrs(hdev); return count; } static ssize_t pstr_show(struct device *kdev, struct device_attribute *kattr, char *buf, int number) { struct hid_device *hdev = to_hid_device(kdev); struct cp2112_string_report report; u8 length; int ret; ret = cp2112_hid_get(hdev, number, (u8 *)&report.contents, sizeof(report.contents), HID_FEATURE_REPORT); if (ret < 3) { hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name, ret); if (ret < 0) return ret; return -EIO; } if (report.length < 2) { hid_err(hdev, "invalid %s string length: %d\n", kattr->attr.name, report.length); return -EIO; } length = report.length > ret - 1 ? ret - 1 : report.length; length = (length - 2) / sizeof(report.string[0]); ret = utf16s_to_utf8s(report.string, length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[ret++] = '\n'; return ret; } #define CP2112_PSTR_ATTR(name, _report) \ static ssize_t name##_store(struct device *kdev, struct device_attribute *kattr, \ const char *buf, size_t count) \ { \ return pstr_store(kdev, kattr, buf, count, _report); \ } \ static ssize_t name##_show(struct device *kdev, struct device_attribute *kattr, char *buf) \ { \ return pstr_show(kdev, kattr, buf, _report); \ } \ static DEVICE_ATTR_RW(name); CP2112_PSTR_ATTR(manufacturer, CP2112_MANUFACTURER_STRING); CP2112_PSTR_ATTR(product, CP2112_PRODUCT_STRING); CP2112_PSTR_ATTR(serial, CP2112_SERIAL_STRING); #undef CP2112_PSTR_ATTR static const struct attribute_group cp2112_attr_group = { .attrs = (struct attribute *[]){ &dev_attr_vendor_id.attr, &dev_attr_product_id.attr, &dev_attr_max_power.attr, &dev_attr_power_mode.attr, &dev_attr_release_version.attr, &dev_attr_manufacturer.attr, &dev_attr_product.attr, &dev_attr_serial.attr, NULL } }; /* Chmoding our sysfs attributes is simply a way to expose which fields in the * PROM have already been programmed. We do not depend on this preventing * writing to these attributes since the CP2112 will simply ignore writes to * already-programmed fields. This is why there is no sense in fixing this * racy behaviour. */ static void chmod_sysfs_attrs(struct hid_device *hdev) { struct attribute **attr; u8 buf[2]; int ret; ret = cp2112_hid_get(hdev, CP2112_LOCK_BYTE, buf, sizeof(buf), HID_FEATURE_REPORT); if (ret != sizeof(buf)) { hid_err(hdev, "error reading lock byte: %d\n", ret); return; } for (attr = cp2112_attr_group.attrs; *attr; ++attr) { umode_t mode = (buf[1] & 1) ? 0644 : 0444; ret = sysfs_chmod_file(&hdev->dev.kobj, *attr, mode); if (ret < 0) hid_err(hdev, "error chmoding sysfs file %s\n", (*attr)->name); buf[1] >>= 1; } } static void cp2112_gpio_irq_ack(struct irq_data *d) { } static void cp2112_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); irq_hw_number_t hwirq = irqd_to_hwirq(d); __clear_bit(hwirq, &dev->irq_mask); gpiochip_disable_irq(gc, hwirq); } static void cp2112_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); irq_hw_number_t hwirq = irqd_to_hwirq(d); gpiochip_enable_irq(gc, hwirq); __set_bit(hwirq, &dev->irq_mask); } static void cp2112_gpio_poll_callback(struct work_struct *work) { struct cp2112_device *dev = container_of(work, struct cp2112_device, gpio_poll_worker.work); u8 gpio_mask; u32 irq_type; int irq, virq, ret; ret = cp2112_gpio_get_all(&dev->gc); if (ret == -ENODEV) /* the hardware has been disconnected */ return; if (ret < 0) goto exit; gpio_mask = ret; for_each_set_bit(virq, &dev->irq_mask, CP2112_GPIO_MAX_GPIO) { irq = irq_find_mapping(dev->gc.irq.domain, virq); if (!irq) continue; irq_type = irq_get_trigger_type(irq); if (!irq_type) continue; if (gpio_mask & BIT(virq)) { /* Level High */ if (irq_type & IRQ_TYPE_LEVEL_HIGH) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_RISING) && !(dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } else { /* Level Low */ if (irq_type & IRQ_TYPE_LEVEL_LOW) handle_nested_irq(irq); if ((irq_type & IRQ_TYPE_EDGE_FALLING) && (dev->gpio_prev_state & BIT(virq))) handle_nested_irq(irq); } } dev->gpio_prev_state = gpio_mask; exit: if (dev->gpio_poll) schedule_delayed_work(&dev->gpio_poll_worker, 10); } static unsigned int cp2112_gpio_irq_startup(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); if (!dev->gpio_poll) { dev->gpio_poll = true; schedule_delayed_work(&dev->gpio_poll_worker, 0); } cp2112_gpio_irq_unmask(d); return 0; } static void cp2112_gpio_irq_shutdown(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct cp2112_device *dev = gpiochip_get_data(gc); cp2112_gpio_irq_mask(d); if (!dev->irq_mask) { dev->gpio_poll = false; cancel_delayed_work_sync(&dev->gpio_poll_worker); } } static int cp2112_gpio_irq_type(struct irq_data *d, unsigned int type) { return 0; } static const struct irq_chip cp2112_gpio_irqchip = { .name = "cp2112-gpio", .irq_startup = cp2112_gpio_irq_startup, .irq_shutdown = cp2112_gpio_irq_shutdown, .irq_ack = cp2112_gpio_irq_ack, .irq_mask = cp2112_gpio_irq_mask, .irq_unmask = cp2112_gpio_irq_unmask, .irq_set_type = cp2112_gpio_irq_type, .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct cp2112_device *dev; u8 buf[3]; struct cp2112_smbus_config_report config; struct gpio_irq_chip *girq; int ret; dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH, GFP_KERNEL); if (!dev->in_out_buffer) return -ENOMEM; ret = devm_mutex_init(&hdev->dev, &dev->lock); if (ret) { hid_err(hdev, "mutex init failed\n"); return ret; } ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "hw open failed\n"); goto err_hid_stop; } ret = hid_hw_power(hdev, PM_HINT_FULLON); if (ret < 0) { hid_err(hdev, "power management error: %d\n", ret); goto err_hid_close; } ret = cp2112_hid_get(hdev, CP2112_GET_VERSION_INFO, buf, sizeof(buf), HID_FEATURE_REPORT); if (ret != sizeof(buf)) { hid_err(hdev, "error requesting version\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } hid_info(hdev, "Part Number: 0x%02X Device Version: 0x%02X\n", buf[1], buf[2]); ret = cp2112_hid_get(hdev, CP2112_SMBUS_CONFIG, (u8 *)&config, sizeof(config), HID_FEATURE_REPORT); if (ret != sizeof(config)) { hid_err(hdev, "error requesting SMBus config\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } config.retry_time = cpu_to_be16(1); ret = cp2112_hid_output(hdev, (u8 *)&config, sizeof(config), HID_FEATURE_REPORT); if (ret != sizeof(config)) { hid_err(hdev, "error setting SMBus config\n"); if (ret >= 0) ret = -EIO; goto err_power_normal; } hid_set_drvdata(hdev, (void *)dev); dev->hdev = hdev; dev->adap.owner = THIS_MODULE; dev->adap.class = I2C_CLASS_HWMON; dev->adap.algo = &smbus_algorithm; dev->adap.algo_data = dev; dev->adap.dev.parent = &hdev->dev; snprintf(dev->adap.name, sizeof(dev->adap.name), "CP2112 SMBus Bridge on hidraw%d", ((struct hidraw *)hdev->hidraw)->minor); dev->hwversion = buf[2]; init_waitqueue_head(&dev->wait); hid_device_io_start(hdev); ret = i2c_add_adapter(&dev->adap); hid_device_io_stop(hdev); if (ret) { hid_err(hdev, "error registering i2c adapter\n"); goto err_power_normal; } hid_dbg(hdev, "adapter registered\n"); dev->gc.label = "cp2112_gpio"; dev->gc.direction_input = cp2112_gpio_direction_input; dev->gc.direction_output = cp2112_gpio_direction_output; dev->gc.set_rv = cp2112_gpio_set; dev->gc.get = cp2112_gpio_get; dev->gc.base = -1; dev->gc.ngpio = CP2112_GPIO_MAX_GPIO; dev->gc.can_sleep = 1; dev->gc.parent = &hdev->dev; girq = &dev->gc.irq; gpio_irq_chip_set_chip(girq, &cp2112_gpio_irqchip); /* The event comes from the outside so no parent handler */ girq->parent_handler = NULL; girq->num_parents = 0; girq->parents = NULL; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_simple_irq; girq->threaded = true; INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback); ret = gpiochip_add_data(&dev->gc, dev); if (ret < 0) { hid_err(hdev, "error registering gpio chip\n"); goto err_free_i2c; } ret = sysfs_create_group(&hdev->dev.kobj, &cp2112_attr_group); if (ret < 0) { hid_err(hdev, "error creating sysfs attrs\n"); goto err_gpiochip_remove; } chmod_sysfs_attrs(hdev); hid_hw_power(hdev, PM_HINT_NORMAL); return ret; err_gpiochip_remove: gpiochip_remove(&dev->gc); err_free_i2c: i2c_del_adapter(&dev->adap); err_power_normal: hid_hw_power(hdev, PM_HINT_NORMAL); err_hid_close: hid_hw_close(hdev); err_hid_stop: hid_hw_stop(hdev); return ret; } static void cp2112_remove(struct hid_device *hdev) { struct cp2112_device *dev = hid_get_drvdata(hdev); sysfs_remove_group(&hdev->dev.kobj, &cp2112_attr_group); i2c_del_adapter(&dev->adap); if (dev->gpio_poll) { dev->gpio_poll = false; cancel_delayed_work_sync(&dev->gpio_poll_worker); } gpiochip_remove(&dev->gc); /* i2c_del_adapter has finished removing all i2c devices from our * adapter. Well behaved devices should no longer call our cp2112_xfer * and should have waited for any pending calls to finish. It has also * waited for device_unregister(&adap->dev) to complete. Therefore we * can safely free our struct cp2112_device. */ hid_hw_close(hdev); hid_hw_stop(hdev); } static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct cp2112_device *dev = hid_get_drvdata(hdev); struct cp2112_xfer_status_report *xfer = (void *)data; switch (data[0]) { case CP2112_TRANSFER_STATUS_RESPONSE: hid_dbg(hdev, "xfer status: %02x %02x %04x %04x\n", xfer->status0, xfer->status1, be16_to_cpu(xfer->retries), be16_to_cpu(xfer->length)); switch (xfer->status0) { case STATUS0_IDLE: dev->xfer_status = -EAGAIN; break; case STATUS0_BUSY: dev->xfer_status = -EBUSY; break; case STATUS0_COMPLETE: dev->xfer_status = be16_to_cpu(xfer->length); break; case STATUS0_ERROR: switch (xfer->status1) { case STATUS1_TIMEOUT_NACK: case STATUS1_TIMEOUT_BUS: dev->xfer_status = -ETIMEDOUT; break; default: dev->xfer_status = -EIO; break; } break; default: dev->xfer_status = -EINVAL; break; } atomic_set(&dev->xfer_avail, 1); break; case CP2112_DATA_READ_RESPONSE: hid_dbg(hdev, "read response: %02x %02x\n", data[1], data[2]); dev->read_length = data[2]; if (dev->read_length > sizeof(dev->read_data)) dev->read_length = sizeof(dev->read_data); memcpy(dev->read_data, &data[3], dev->read_length); atomic_set(&dev->read_avail, 1); break; default: hid_err(hdev, "unknown report\n"); return 0; } wake_up_interruptible(&dev->wait); return 1; } static struct hid_driver cp2112_driver = { .name = "cp2112", .id_table = cp2112_devices, .probe = cp2112_probe, .remove = cp2112_remove, .raw_event = cp2112_raw_event, }; module_hid_driver(cp2112_driver); MODULE_DESCRIPTION("Silicon Labs HID USB to SMBus master bridge"); MODULE_AUTHOR("David Barksdale <dbarksdale@uplogix.com>"); MODULE_LICENSE("GPL");
4 4 4 4 4 4 4 4 4 4 5 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 // SPDX-License-Identifier: GPL-2.0-or-later /* * Afatech AF9035 DVB USB driver * * Copyright (C) 2009 Antti Palosaari <crope@iki.fi> * Copyright (C) 2012 Antti Palosaari <crope@iki.fi> */ #include "af9035.h" /* Max transfer size done by I2C transfer functions */ #define MAX_XFER_SIZE 64 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static u16 af9035_checksum(const u8 *buf, size_t len) { size_t i; u16 checksum = 0; for (i = 1; i < len; i++) { if (i % 2) checksum += buf[i] << 8; else checksum += buf[i]; } checksum = ~checksum; return checksum; } static int af9035_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req) { #define REQ_HDR_LEN 4 /* send header size */ #define ACK_HDR_LEN 3 /* rece header size */ #define CHECKSUM_LEN 2 #define USB_TIMEOUT 2000 struct state *state = d_to_priv(d); struct usb_interface *intf = d->intf; int ret, wlen, rlen; u16 checksum, tmp_checksum; mutex_lock(&d->usb_mutex); /* buffer overflow check */ if (req->wlen > (BUF_LEN - REQ_HDR_LEN - CHECKSUM_LEN) || req->rlen > (BUF_LEN - ACK_HDR_LEN - CHECKSUM_LEN)) { dev_err(&intf->dev, "too much data wlen=%d rlen=%d\n", req->wlen, req->rlen); ret = -EINVAL; goto exit; } state->buf[0] = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN - 1; state->buf[1] = req->mbox; state->buf[2] = req->cmd; state->buf[3] = state->seq++; memcpy(&state->buf[REQ_HDR_LEN], req->wbuf, req->wlen); wlen = REQ_HDR_LEN + req->wlen + CHECKSUM_LEN; rlen = ACK_HDR_LEN + req->rlen + CHECKSUM_LEN; /* calc and add checksum */ checksum = af9035_checksum(state->buf, state->buf[0] - 1); state->buf[state->buf[0] - 1] = (checksum >> 8); state->buf[state->buf[0] - 0] = (checksum & 0xff); /* no ack for these packets */ if (req->cmd == CMD_FW_DL) rlen = 0; ret = dvb_usbv2_generic_rw_locked(d, state->buf, wlen, state->buf, rlen); if (ret) goto exit; /* no ack for those packets */ if (req->cmd == CMD_FW_DL) goto exit; /* verify checksum */ checksum = af9035_checksum(state->buf, rlen - 2); tmp_checksum = (state->buf[rlen - 2] << 8) | state->buf[rlen - 1]; if (tmp_checksum != checksum) { dev_err(&intf->dev, "command=%02x checksum mismatch (%04x != %04x)\n", req->cmd, tmp_checksum, checksum); ret = -EIO; goto exit; } /* check status */ if (state->buf[2]) { /* fw returns status 1 when IR code was not received */ if (req->cmd == CMD_IR_GET || state->buf[2] == 1) { ret = 1; goto exit; } dev_dbg(&intf->dev, "command=%02x failed fw error=%d\n", req->cmd, state->buf[2]); ret = -EIO; goto exit; } /* read request, copy returned data to return buf */ if (req->rlen) memcpy(req->rbuf, &state->buf[ACK_HDR_LEN], req->rlen); exit: mutex_unlock(&d->usb_mutex); return ret; } /* write multiple registers */ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len) { struct usb_interface *intf = d->intf; u8 wbuf[MAX_XFER_SIZE]; u8 mbox = (reg >> 16) & 0xff; struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL }; if (6 + len > sizeof(wbuf)) { dev_warn(&intf->dev, "i2c wr: len=%d is too big!\n", len); return -EOPNOTSUPP; } wbuf[0] = len; wbuf[1] = 2; wbuf[2] = 0; wbuf[3] = 0; wbuf[4] = (reg >> 8) & 0xff; wbuf[5] = (reg >> 0) & 0xff; memcpy(&wbuf[6], val, len); return af9035_ctrl_msg(d, &req); } /* read multiple registers */ static int af9035_rd_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len) { u8 wbuf[] = { len, 2, 0, 0, (reg >> 8) & 0xff, reg & 0xff }; u8 mbox = (reg >> 16) & 0xff; struct usb_req req = { CMD_MEM_RD, mbox, sizeof(wbuf), wbuf, len, val }; return af9035_ctrl_msg(d, &req); } /* write single register */ static int af9035_wr_reg(struct dvb_usb_device *d, u32 reg, u8 val) { return af9035_wr_regs(d, reg, &val, 1); } /* read single register */ static int af9035_rd_reg(struct dvb_usb_device *d, u32 reg, u8 *val) { return af9035_rd_regs(d, reg, val, 1); } /* write single register with mask */ static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val, u8 mask) { int ret; u8 tmp; /* no need for read if whole reg is written */ if (mask != 0xff) { ret = af9035_rd_regs(d, reg, &tmp, 1); if (ret) return ret; val &= mask; tmp &= ~mask; val |= tmp; } return af9035_wr_regs(d, reg, &val, 1); } static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type, u8 addr, void *platform_data, struct i2c_adapter *adapter) { int ret, num; struct state *state = d_to_priv(d); struct usb_interface *intf = d->intf; struct i2c_client *client; struct i2c_board_info board_info = { .addr = addr, .platform_data = platform_data, }; strscpy(board_info.type, type, I2C_NAME_SIZE); /* find first free client */ for (num = 0; num < AF9035_I2C_CLIENT_MAX; num++) { if (state->i2c_client[num] == NULL) break; } dev_dbg(&intf->dev, "num=%d\n", num); if (num == AF9035_I2C_CLIENT_MAX) { dev_err(&intf->dev, "I2C client out of index\n"); ret = -ENODEV; goto err; } request_module("%s", board_info.type); /* register I2C device */ client = i2c_new_client_device(adapter, &board_info); if (!i2c_client_has_driver(client)) { dev_err(&intf->dev, "failed to bind i2c device to %s driver\n", type); ret = -ENODEV; goto err; } /* increase I2C driver usage count */ if (!try_module_get(client->dev.driver->owner)) { i2c_unregister_device(client); ret = -ENODEV; goto err; } state->i2c_client[num] = client; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static void af9035_del_i2c_dev(struct dvb_usb_device *d) { int num; struct state *state = d_to_priv(d); struct usb_interface *intf = d->intf; struct i2c_client *client; /* find last used client */ num = AF9035_I2C_CLIENT_MAX; while (num--) { if (state->i2c_client[num] != NULL) break; } dev_dbg(&intf->dev, "num=%d\n", num); if (num == -1) { dev_err(&intf->dev, "I2C client out of index\n"); goto err; } client = state->i2c_client[num]; /* decrease I2C driver usage count */ module_put(client->dev.driver->owner); /* unregister I2C device */ i2c_unregister_device(client); state->i2c_client[num] = NULL; return; err: dev_dbg(&intf->dev, "failed\n"); } static int af9035_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct state *state = d_to_priv(d); int ret; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; /* * AF9035 I2C sub header is 5 bytes long. Meaning of those bytes are: * 0: data len * 1: I2C addr << 1 * 2: reg addr len * byte 3 and 4 can be used as reg addr * 3: reg addr MSB * used when reg addr len is set to 2 * 4: reg addr LSB * used when reg addr len is set to 1 or 2 * * For the simplify we do not use register addr at all. * NOTE: As a firmware knows tuner type there is very small possibility * there could be some tuner I2C hacks done by firmware and this may * lead problems if firmware expects those bytes are used. * * TODO: Here is few hacks. AF9035 chip integrates AF9033 demodulator. * IT9135 chip integrates AF9033 demodulator and RF tuner. For dual * tuner devices, there is also external AF9033 demodulator connected * via external I2C bus. All AF9033 demod I2C traffic, both single and * dual tuner configuration, is covered by firmware - actual USB IO * looks just like a memory access. * In case of IT913x chip, there is own tuner driver. It is implemented * currently as a I2C driver, even tuner IP block is likely build * directly into the demodulator memory space and there is no own I2C * bus. I2C subsystem does not allow register multiple devices to same * bus, having same slave address. Due to that we reuse demod address, * shifted by one bit, on that case. * * For IT930x we use a different command and the sub header is * different as well: * 0: data len * 1: I2C bus (0x03 seems to be only value used) * 2: I2C addr << 1 */ #define AF9035_IS_I2C_XFER_WRITE_READ(_msg, _num) \ (_num == 2 && !(_msg[0].flags & I2C_M_RD) && (_msg[1].flags & I2C_M_RD)) #define AF9035_IS_I2C_XFER_WRITE(_msg, _num) \ (_num == 1 && !(_msg[0].flags & I2C_M_RD)) #define AF9035_IS_I2C_XFER_READ(_msg, _num) \ (_num == 1 && (_msg[0].flags & I2C_M_RD)) if (AF9035_IS_I2C_XFER_WRITE_READ(msg, num)) { if (msg[0].len > 40 || msg[1].len > 40) { /* TODO: correct limits > 40 */ ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { /* demod access via firmware interface */ u32 reg; if (msg[0].len < 3 || msg[1].len < 1) { ret = -EOPNOTSUPP; goto unlock; } reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; if (msg[0].addr == state->af9033_i2c_addr[1]) reg |= 0x100000; ret = af9035_rd_regs(d, reg, &msg[1].buf[0], msg[1].len); } else if (state->no_read) { memset(msg[1].buf, 0, msg[1].len); ret = 0; } else { /* I2C write + read */ u8 buf[MAX_XFER_SIZE]; struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len, buf, msg[1].len, msg[1].buf }; if (state->chip_type == 0x9306) { req.cmd = CMD_GENERIC_I2C_RD; req.wlen = 3 + msg[0].len; } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[1].len; if (state->chip_type == 0x9306) { buf[1] = 0x03; /* I2C bus */ buf[2] = msg[0].addr << 1; memcpy(&buf[3], msg[0].buf, msg[0].len); } else { buf[1] = msg[0].addr << 1; buf[3] = 0x00; /* reg addr MSB */ buf[4] = 0x00; /* reg addr LSB */ /* Keep prev behavior for write req len > 2*/ if (msg[0].len > 2) { buf[2] = 0x00; /* reg addr len */ memcpy(&buf[5], msg[0].buf, msg[0].len); /* Use reg addr fields if write req len <= 2 */ } else { req.wlen = 5; buf[2] = msg[0].len; if (msg[0].len == 2) { buf[3] = msg[0].buf[0]; buf[4] = msg[0].buf[1]; } else if (msg[0].len == 1) { buf[4] = msg[0].buf[0]; } } } ret = af9035_ctrl_msg(d, &req); } } else if (AF9035_IS_I2C_XFER_WRITE(msg, num)) { if (msg[0].len > 40) { /* TODO: correct limits > 40 */ ret = -EOPNOTSUPP; } else if ((msg[0].addr == state->af9033_i2c_addr[0]) || (msg[0].addr == state->af9033_i2c_addr[1])) { /* demod access via firmware interface */ u32 reg; if (msg[0].len < 3) { ret = -EOPNOTSUPP; goto unlock; } reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 | msg[0].buf[2]; if (msg[0].addr == state->af9033_i2c_addr[1]) reg |= 0x100000; ret = af9035_wr_regs(d, reg, &msg[0].buf[3], msg[0].len - 3); } else { /* I2C write */ u8 buf[MAX_XFER_SIZE]; struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len, buf, 0, NULL }; if (state->chip_type == 0x9306) { req.cmd = CMD_GENERIC_I2C_WR; req.wlen = 3 + msg[0].len; } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[0].len; if (state->chip_type == 0x9306) { buf[1] = 0x03; /* I2C bus */ buf[2] = msg[0].addr << 1; memcpy(&buf[3], msg[0].buf, msg[0].len); } else { buf[1] = msg[0].addr << 1; buf[2] = 0x00; /* reg addr len */ buf[3] = 0x00; /* reg addr MSB */ buf[4] = 0x00; /* reg addr LSB */ memcpy(&buf[5], msg[0].buf, msg[0].len); } ret = af9035_ctrl_msg(d, &req); } } else if (AF9035_IS_I2C_XFER_READ(msg, num)) { if (msg[0].len > 40) { /* TODO: correct limits > 40 */ ret = -EOPNOTSUPP; } else if (state->no_read) { memset(msg[0].buf, 0, msg[0].len); ret = 0; } else { /* I2C read */ u8 buf[5]; struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf), buf, msg[0].len, msg[0].buf }; if (state->chip_type == 0x9306) { req.cmd = CMD_GENERIC_I2C_RD; req.wlen = 3; } req.mbox |= ((msg[0].addr & 0x80) >> 3); buf[0] = msg[0].len; if (state->chip_type == 0x9306) { buf[1] = 0x03; /* I2C bus */ buf[2] = msg[0].addr << 1; } else { buf[1] = msg[0].addr << 1; buf[2] = 0x00; /* reg addr len */ buf[3] = 0x00; /* reg addr MSB */ buf[4] = 0x00; /* reg addr LSB */ } ret = af9035_ctrl_msg(d, &req); } } else { /* * We support only three kind of I2C transactions: * 1) 1 x write + 1 x read (repeated start) * 2) 1 x write * 3) 1 x read */ ret = -EOPNOTSUPP; } unlock: mutex_unlock(&d->i2c_mutex); if (ret < 0) return ret; else return num; } static u32 af9035_i2c_functionality(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static const struct i2c_algorithm af9035_i2c_algo = { .master_xfer = af9035_i2c_master_xfer, .functionality = af9035_i2c_functionality, }; static int af9035_identify_state(struct dvb_usb_device *d, const char **name) { struct state *state = d_to_priv(d); struct usb_interface *intf = d->intf; int ret, i, ts_mode_invalid; unsigned int utmp, eeprom_addr; u8 tmp; u8 wbuf[1] = { 1 }; u8 rbuf[4]; struct usb_req req = { CMD_FW_QUERYINFO, 0, sizeof(wbuf), wbuf, sizeof(rbuf), rbuf }; ret = af9035_rd_regs(d, 0x1222, rbuf, 3); if (ret < 0) goto err; state->chip_version = rbuf[0]; state->chip_type = rbuf[2] << 8 | rbuf[1] << 0; ret = af9035_rd_reg(d, 0x384f, &state->prechip_version); if (ret < 0) goto err; dev_info(&intf->dev, "prechip_version=%02x chip_version=%02x chip_type=%04x\n", state->prechip_version, state->chip_version, state->chip_type); if (state->chip_type == 0x9135) { if (state->chip_version == 0x02) { *name = AF9035_FIRMWARE_IT9135_V2; utmp = 0x00461d; } else { *name = AF9035_FIRMWARE_IT9135_V1; utmp = 0x00461b; } /* Check if eeprom exists */ ret = af9035_rd_reg(d, utmp, &tmp); if (ret < 0) goto err; if (tmp == 0x00) { dev_dbg(&intf->dev, "no eeprom\n"); state->no_eeprom = true; goto check_firmware_status; } eeprom_addr = EEPROM_BASE_IT9135; } else if (state->chip_type == 0x9306) { *name = AF9035_FIRMWARE_IT9303; state->no_eeprom = true; goto check_firmware_status; } else { *name = AF9035_FIRMWARE_AF9035; eeprom_addr = EEPROM_BASE_AF9035; } /* Read and store eeprom */ for (i = 0; i < 256; i += 32) { ret = af9035_rd_regs(d, eeprom_addr + i, &state->eeprom[i], 32); if (ret < 0) goto err; } dev_dbg(&intf->dev, "eeprom dump:\n"); for (i = 0; i < 256; i += 16) dev_dbg(&intf->dev, "%*ph\n", 16, &state->eeprom[i]); /* check for dual tuner mode */ tmp = state->eeprom[EEPROM_TS_MODE]; ts_mode_invalid = 0; switch (tmp) { case 0: break; case 1: case 3: state->dual_mode = true; break; case 5: if (state->chip_type != 0x9135 && state->chip_type != 0x9306) state->dual_mode = true; /* AF9035 */ else ts_mode_invalid = 1; break; default: ts_mode_invalid = 1; } dev_dbg(&intf->dev, "ts mode=%d dual mode=%d\n", tmp, state->dual_mode); if (ts_mode_invalid) dev_info(&intf->dev, "ts mode=%d not supported, defaulting to single tuner mode!", tmp); check_firmware_status: ret = af9035_ctrl_msg(d, &req); if (ret < 0) goto err; dev_dbg(&intf->dev, "reply=%*ph\n", 4, rbuf); if (rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3]) ret = WARM; else ret = COLD; return ret; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_download_firmware_old(struct dvb_usb_device *d, const struct firmware *fw) { struct usb_interface *intf = d->intf; int ret, i, j, len; u8 wbuf[1]; struct usb_req req = { 0, 0, 0, NULL, 0, NULL }; struct usb_req req_fw_dl = { CMD_FW_DL, 0, 0, wbuf, 0, NULL }; u8 hdr_core; u16 hdr_addr, hdr_data_len, hdr_checksum; #define MAX_DATA 58 #define HDR_SIZE 7 /* * Thanks to Daniel Glöckner <daniel-gl@gmx.net> about that info! * * byte 0: MCS 51 core * There are two inside the AF9035 (1=Link and 2=OFDM) with separate * address spaces * byte 1-2: Big endian destination address * byte 3-4: Big endian number of data bytes following the header * byte 5-6: Big endian header checksum, apparently ignored by the chip * Calculated as ~(h[0]*256+h[1]+h[2]*256+h[3]+h[4]*256) */ for (i = fw->size; i > HDR_SIZE;) { hdr_core = fw->data[fw->size - i + 0]; hdr_addr = fw->data[fw->size - i + 1] << 8; hdr_addr |= fw->data[fw->size - i + 2] << 0; hdr_data_len = fw->data[fw->size - i + 3] << 8; hdr_data_len |= fw->data[fw->size - i + 4] << 0; hdr_checksum = fw->data[fw->size - i + 5] << 8; hdr_checksum |= fw->data[fw->size - i + 6] << 0; dev_dbg(&intf->dev, "core=%d addr=%04x data_len=%d checksum=%04x\n", hdr_core, hdr_addr, hdr_data_len, hdr_checksum); if (((hdr_core != 1) && (hdr_core != 2)) || (hdr_data_len > i)) { dev_dbg(&intf->dev, "bad firmware\n"); break; } /* download begin packet */ req.cmd = CMD_FW_DL_BEGIN; ret = af9035_ctrl_msg(d, &req); if (ret < 0) goto err; /* download firmware packet(s) */ for (j = HDR_SIZE + hdr_data_len; j > 0; j -= MAX_DATA) { len = j; if (len > MAX_DATA) len = MAX_DATA; req_fw_dl.wlen = len; req_fw_dl.wbuf = (u8 *) &fw->data[fw->size - i + HDR_SIZE + hdr_data_len - j]; ret = af9035_ctrl_msg(d, &req_fw_dl); if (ret < 0) goto err; } /* download end packet */ req.cmd = CMD_FW_DL_END; ret = af9035_ctrl_msg(d, &req); if (ret < 0) goto err; i -= hdr_data_len + HDR_SIZE; dev_dbg(&intf->dev, "data uploaded=%zu\n", fw->size - i); } /* print warn if firmware is bad, continue and see what happens */ if (i) dev_warn(&intf->dev, "bad firmware\n"); return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_download_firmware_new(struct dvb_usb_device *d, const struct firmware *fw) { struct usb_interface *intf = d->intf; int ret, i, i_prev; struct usb_req req_fw_dl = { CMD_FW_SCATTER_WR, 0, 0, NULL, 0, NULL }; #define HDR_SIZE 7 /* * There seems to be following firmware header. Meaning of bytes 0-3 * is unknown. * * 0: 3 * 1: 0, 1 * 2: 0 * 3: 1, 2, 3 * 4: addr MSB * 5: addr LSB * 6: count of data bytes ? */ for (i = HDR_SIZE, i_prev = 0; i <= fw->size; i++) { if (i == fw->size || (fw->data[i + 0] == 0x03 && (fw->data[i + 1] == 0x00 || fw->data[i + 1] == 0x01) && fw->data[i + 2] == 0x00)) { req_fw_dl.wlen = i - i_prev; req_fw_dl.wbuf = (u8 *) &fw->data[i_prev]; i_prev = i; ret = af9035_ctrl_msg(d, &req_fw_dl); if (ret < 0) goto err; dev_dbg(&intf->dev, "data uploaded=%d\n", i); } } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_download_firmware(struct dvb_usb_device *d, const struct firmware *fw) { struct usb_interface *intf = d->intf; struct state *state = d_to_priv(d); int ret; u8 wbuf[1]; u8 rbuf[4]; u8 tmp; struct usb_req req = { 0, 0, 0, NULL, 0, NULL }; struct usb_req req_fw_ver = { CMD_FW_QUERYINFO, 0, 1, wbuf, 4, rbuf }; dev_dbg(&intf->dev, "\n"); /* * In case of dual tuner configuration we need to do some extra * initialization in order to download firmware to slave demod too, * which is done by master demod. * Master feeds also clock and controls power via GPIO. */ if (state->dual_mode) { /* configure gpioh1, reset & power slave demod */ ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0x00d8b1, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0x00d8af, 0x00, 0x01); if (ret < 0) goto err; usleep_range(10000, 50000); ret = af9035_wr_reg_mask(d, 0x00d8af, 0x01, 0x01); if (ret < 0) goto err; /* tell the slave I2C address */ tmp = state->eeprom[EEPROM_2ND_DEMOD_ADDR]; /* Use default I2C address if eeprom has no address set */ if (!tmp) tmp = 0x1d << 1; /* 8-bit format used by chip */ if ((state->chip_type == 0x9135) || (state->chip_type == 0x9306)) { ret = af9035_wr_reg(d, 0x004bfb, tmp); if (ret < 0) goto err; } else { ret = af9035_wr_reg(d, 0x00417f, tmp); if (ret < 0) goto err; /* enable clock out */ ret = af9035_wr_reg_mask(d, 0x00d81a, 0x01, 0x01); if (ret < 0) goto err; } } if (fw->data[0] == 0x01) ret = af9035_download_firmware_old(d, fw); else ret = af9035_download_firmware_new(d, fw); if (ret < 0) goto err; /* firmware loaded, request boot */ req.cmd = CMD_FW_BOOT; ret = af9035_ctrl_msg(d, &req); if (ret < 0) goto err; /* ensure firmware starts */ wbuf[0] = 1; ret = af9035_ctrl_msg(d, &req_fw_ver); if (ret < 0) goto err; if (!(rbuf[0] || rbuf[1] || rbuf[2] || rbuf[3])) { dev_err(&intf->dev, "firmware did not run\n"); ret = -ENODEV; goto err; } dev_info(&intf->dev, "firmware version=%d.%d.%d.%d", rbuf[0], rbuf[1], rbuf[2], rbuf[3]); return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_read_config(struct dvb_usb_device *d) { struct usb_interface *intf = d->intf; struct state *state = d_to_priv(d); int ret, i; u8 tmp; u16 tmp16; /* Demod I2C address */ state->af9033_i2c_addr[0] = 0x1c; state->af9033_i2c_addr[1] = 0x1d; state->af9033_config[0].adc_multiplier = AF9033_ADC_MULTIPLIER_2X; state->af9033_config[1].adc_multiplier = AF9033_ADC_MULTIPLIER_2X; state->af9033_config[0].ts_mode = AF9033_TS_MODE_USB; state->af9033_config[1].ts_mode = AF9033_TS_MODE_SERIAL; state->it930x_addresses = 0; if (state->chip_type == 0x9135) { /* feed clock for integrated RF tuner */ state->af9033_config[0].dyn0_clk = true; state->af9033_config[1].dyn0_clk = true; if (state->chip_version == 0x02) { state->af9033_config[0].tuner = AF9033_TUNER_IT9135_60; state->af9033_config[1].tuner = AF9033_TUNER_IT9135_60; } else { state->af9033_config[0].tuner = AF9033_TUNER_IT9135_38; state->af9033_config[1].tuner = AF9033_TUNER_IT9135_38; } if (state->no_eeprom) { /* Remote controller to NEC polling by default */ state->ir_mode = 0x05; state->ir_type = 0x00; goto skip_eeprom; } } else if (state->chip_type == 0x9306) { /* * IT930x is an USB bridge, only single demod-single tuner * configurations seen so far. */ if ((le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_AVERMEDIA) && (le16_to_cpu(d->udev->descriptor.idProduct) == USB_PID_AVERMEDIA_TD310)) { state->it930x_addresses = 1; /* TD310 RC works with NEC defaults */ state->ir_mode = 0x05; state->ir_type = 0x00; } return 0; } /* Remote controller */ state->ir_mode = state->eeprom[EEPROM_IR_MODE]; state->ir_type = state->eeprom[EEPROM_IR_TYPE]; if (state->dual_mode) { /* Read 2nd demodulator I2C address. 8-bit format on eeprom */ tmp = state->eeprom[EEPROM_2ND_DEMOD_ADDR]; if (tmp) state->af9033_i2c_addr[1] = tmp >> 1; dev_dbg(&intf->dev, "2nd demod I2C addr=%02x\n", state->af9033_i2c_addr[1]); } for (i = 0; i < state->dual_mode + 1; i++) { unsigned int eeprom_offset = 0; /* tuner */ tmp = state->eeprom[EEPROM_1_TUNER_ID + eeprom_offset]; dev_dbg(&intf->dev, "[%d]tuner=%02x\n", i, tmp); /* tuner sanity check */ if (state->chip_type == 0x9135) { if (state->chip_version == 0x02) { /* IT9135 BX (v2) */ switch (tmp) { case AF9033_TUNER_IT9135_60: case AF9033_TUNER_IT9135_61: case AF9033_TUNER_IT9135_62: state->af9033_config[i].tuner = tmp; break; } } else { /* IT9135 AX (v1) */ switch (tmp) { case AF9033_TUNER_IT9135_38: case AF9033_TUNER_IT9135_51: case AF9033_TUNER_IT9135_52: state->af9033_config[i].tuner = tmp; break; } } } else { /* AF9035 */ state->af9033_config[i].tuner = tmp; } if (state->af9033_config[i].tuner != tmp) { dev_info(&intf->dev, "[%d] overriding tuner from %02x to %02x\n", i, tmp, state->af9033_config[i].tuner); } switch (state->af9033_config[i].tuner) { case AF9033_TUNER_TUA9001: case AF9033_TUNER_FC0011: case AF9033_TUNER_MXL5007T: case AF9033_TUNER_TDA18218: case AF9033_TUNER_FC2580: case AF9033_TUNER_FC0012: state->af9033_config[i].spec_inv = 1; break; case AF9033_TUNER_IT9135_38: case AF9033_TUNER_IT9135_51: case AF9033_TUNER_IT9135_52: case AF9033_TUNER_IT9135_60: case AF9033_TUNER_IT9135_61: case AF9033_TUNER_IT9135_62: break; default: dev_warn(&intf->dev, "tuner id=%02x not supported, please report!", tmp); } /* disable dual mode if driver does not support it */ if (i == 1) switch (state->af9033_config[i].tuner) { case AF9033_TUNER_FC0012: case AF9033_TUNER_IT9135_38: case AF9033_TUNER_IT9135_51: case AF9033_TUNER_IT9135_52: case AF9033_TUNER_IT9135_60: case AF9033_TUNER_IT9135_61: case AF9033_TUNER_IT9135_62: case AF9033_TUNER_MXL5007T: break; default: state->dual_mode = false; dev_info(&intf->dev, "driver does not support 2nd tuner and will disable it"); } /* tuner IF frequency */ tmp = state->eeprom[EEPROM_1_IF_L + eeprom_offset]; tmp16 = tmp << 0; tmp = state->eeprom[EEPROM_1_IF_H + eeprom_offset]; tmp16 |= tmp << 8; dev_dbg(&intf->dev, "[%d]IF=%d\n", i, tmp16); eeprom_offset += 0x10; /* shift for the 2nd tuner params */ } skip_eeprom: /* get demod clock */ ret = af9035_rd_reg(d, 0x00d800, &tmp); if (ret < 0) goto err; tmp = (tmp >> 0) & 0x0f; for (i = 0; i < ARRAY_SIZE(state->af9033_config); i++) { if (state->chip_type == 0x9135) state->af9033_config[i].clock = clock_lut_it9135[tmp]; else state->af9033_config[i].clock = clock_lut_af9035[tmp]; } state->no_read = false; /* Some MXL5007T devices cannot properly handle tuner I2C read ops. */ if (state->af9033_config[0].tuner == AF9033_TUNER_MXL5007T && le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_AVERMEDIA) switch (le16_to_cpu(d->udev->descriptor.idProduct)) { case USB_PID_AVERMEDIA_A867: case USB_PID_AVERMEDIA_TWINSTAR: dev_info(&intf->dev, "Device may have issues with I2C read operations. Enabling fix.\n"); state->no_read = true; break; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_tua9001_tuner_callback(struct dvb_usb_device *d, int cmd, int arg) { struct usb_interface *intf = d->intf; int ret; u8 val; dev_dbg(&intf->dev, "cmd=%d arg=%d\n", cmd, arg); /* * CEN always enabled by hardware wiring * RESETN GPIOT3 * RXEN GPIOT2 */ switch (cmd) { case TUA9001_CMD_RESETN: if (arg) val = 0x00; else val = 0x01; ret = af9035_wr_reg_mask(d, 0x00d8e7, val, 0x01); if (ret < 0) goto err; break; case TUA9001_CMD_RXEN: if (arg) val = 0x01; else val = 0x00; ret = af9035_wr_reg_mask(d, 0x00d8eb, val, 0x01); if (ret < 0) goto err; break; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_fc0011_tuner_callback(struct dvb_usb_device *d, int cmd, int arg) { struct usb_interface *intf = d->intf; int ret; switch (cmd) { case FC0011_FE_CALLBACK_POWER: /* Tuner enable */ ret = af9035_wr_reg_mask(d, 0xd8eb, 1, 1); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8ec, 1, 1); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8ed, 1, 1); if (ret < 0) goto err; /* LED */ ret = af9035_wr_reg_mask(d, 0xd8d0, 1, 1); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8d1, 1, 1); if (ret < 0) goto err; usleep_range(10000, 50000); break; case FC0011_FE_CALLBACK_RESET: ret = af9035_wr_reg(d, 0xd8e9, 1); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0xd8e8, 1); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0xd8e7, 1); if (ret < 0) goto err; usleep_range(10000, 20000); ret = af9035_wr_reg(d, 0xd8e7, 0); if (ret < 0) goto err; usleep_range(10000, 20000); break; default: ret = -EINVAL; goto err; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_tuner_callback(struct dvb_usb_device *d, int cmd, int arg) { struct state *state = d_to_priv(d); switch (state->af9033_config[0].tuner) { case AF9033_TUNER_FC0011: return af9035_fc0011_tuner_callback(d, cmd, arg); case AF9033_TUNER_TUA9001: return af9035_tua9001_tuner_callback(d, cmd, arg); default: break; } return 0; } static int af9035_frontend_callback(void *adapter_priv, int component, int cmd, int arg) { struct i2c_adapter *adap = adapter_priv; struct dvb_usb_device *d = i2c_get_adapdata(adap); struct usb_interface *intf = d->intf; dev_dbg(&intf->dev, "component=%d cmd=%d arg=%d\n", component, cmd, arg); switch (component) { case DVB_FRONTEND_COMPONENT_TUNER: return af9035_tuner_callback(d, cmd, arg); default: break; } return 0; } static int af9035_get_adapter_count(struct dvb_usb_device *d) { struct state *state = d_to_priv(d); return state->dual_mode + 1; } static int af9035_frontend_attach(struct dvb_usb_adapter *adap) { struct state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct usb_interface *intf = d->intf; int ret; dev_dbg(&intf->dev, "adap->id=%d\n", adap->id); if (!state->af9033_config[adap->id].tuner) { /* unsupported tuner */ ret = -ENODEV; goto err; } state->af9033_config[adap->id].fe = &adap->fe[0]; state->af9033_config[adap->id].ops = &state->ops; ret = af9035_add_i2c_dev(d, "af9033", state->af9033_i2c_addr[adap->id], &state->af9033_config[adap->id], &d->i2c_adap); if (ret) goto err; if (adap->fe[0] == NULL) { ret = -ENODEV; goto err; } /* disable I2C-gate */ adap->fe[0]->ops.i2c_gate_ctrl = NULL; adap->fe[0]->callback = af9035_frontend_callback; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } /* * The I2C speed register is calculated with: * I2C speed register = (1000000000 / (24.4 * 16 * I2C_speed)) * * The default speed register for it930x is 7, with means a * speed of ~366 kbps */ #define I2C_SPEED_366K 7 static int it930x_frontend_attach(struct dvb_usb_adapter *adap) { struct state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct usb_interface *intf = d->intf; int ret; struct si2168_config si2168_config; struct i2c_adapter *adapter; dev_dbg(&intf->dev, "adap->id=%d\n", adap->id); /* I2C master bus 2 clock speed 366k */ ret = af9035_wr_reg(d, 0x00f6a7, I2C_SPEED_366K); if (ret < 0) goto err; /* I2C master bus 1,3 clock speed 366k */ ret = af9035_wr_reg(d, 0x00f103, I2C_SPEED_366K); if (ret < 0) goto err; /* set gpio11 low */ ret = af9035_wr_reg_mask(d, 0xd8d4, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8d5, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8d3, 0x01, 0x01); if (ret < 0) goto err; /* Tuner enable using gpiot2_en, gpiot2_on and gpiot2_o (reset) */ ret = af9035_wr_reg_mask(d, 0xd8b8, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8b9, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8b7, 0x00, 0x01); if (ret < 0) goto err; msleep(200); ret = af9035_wr_reg_mask(d, 0xd8b7, 0x01, 0x01); if (ret < 0) goto err; memset(&si2168_config, 0, sizeof(si2168_config)); si2168_config.i2c_adapter = &adapter; si2168_config.fe = &adap->fe[0]; si2168_config.ts_mode = SI2168_TS_SERIAL; state->af9033_config[adap->id].fe = &adap->fe[0]; state->af9033_config[adap->id].ops = &state->ops; ret = af9035_add_i2c_dev(d, "si2168", it930x_addresses_table[state->it930x_addresses].frontend_i2c_addr, &si2168_config, &d->i2c_adap); if (ret) goto err; if (adap->fe[0] == NULL) { ret = -ENODEV; goto err; } state->i2c_adapter_demod = adapter; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_frontend_detach(struct dvb_usb_adapter *adap) { struct state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct usb_interface *intf = d->intf; dev_dbg(&intf->dev, "adap->id=%d\n", adap->id); if (adap->id == 1) { if (state->i2c_client[1]) af9035_del_i2c_dev(d); } else if (adap->id == 0) { if (state->i2c_client[0]) af9035_del_i2c_dev(d); } return 0; } static const struct fc0011_config af9035_fc0011_config = { .i2c_address = 0x60, }; static struct mxl5007t_config af9035_mxl5007t_config[] = { { .xtal_freq_hz = MxL_XTAL_24_MHZ, .if_freq_hz = MxL_IF_4_57_MHZ, .invert_if = 0, .loop_thru_enable = 0, .clk_out_enable = 0, .clk_out_amp = MxL_CLKOUT_AMP_0_94V, }, { .xtal_freq_hz = MxL_XTAL_24_MHZ, .if_freq_hz = MxL_IF_4_57_MHZ, .invert_if = 0, .loop_thru_enable = 1, .clk_out_enable = 1, .clk_out_amp = MxL_CLKOUT_AMP_0_94V, } }; static struct tda18218_config af9035_tda18218_config = { .i2c_address = 0x60, .i2c_wr_max = 21, }; static const struct fc0012_config af9035_fc0012_config[] = { { .i2c_address = 0x63, .xtal_freq = FC_XTAL_36_MHZ, .dual_master = true, .loop_through = true, .clock_out = true, }, { .i2c_address = 0x63 | 0x80, /* I2C bus select hack */ .xtal_freq = FC_XTAL_36_MHZ, .dual_master = true, } }; static int af9035_tuner_attach(struct dvb_usb_adapter *adap) { struct state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct usb_interface *intf = d->intf; int ret; struct dvb_frontend *fe; struct i2c_msg msg[1]; u8 tuner_addr; dev_dbg(&intf->dev, "adap->id=%d\n", adap->id); /* * XXX: Hack used in that function: we abuse unused I2C address bit [7] * to carry info about used I2C bus for dual tuner configuration. */ switch (state->af9033_config[adap->id].tuner) { case AF9033_TUNER_TUA9001: { struct tua9001_platform_data tua9001_pdata = { .dvb_frontend = adap->fe[0], }; /* * AF9035 gpiot3 = TUA9001 RESETN * AF9035 gpiot2 = TUA9001 RXEN */ /* configure gpiot2 and gpiot2 as output */ ret = af9035_wr_reg_mask(d, 0x00d8ec, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0x00d8ed, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0x00d8e8, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0x00d8e9, 0x01, 0x01); if (ret < 0) goto err; /* attach tuner */ ret = af9035_add_i2c_dev(d, "tua9001", 0x60, &tua9001_pdata, &d->i2c_adap); if (ret) goto err; fe = adap->fe[0]; break; } case AF9033_TUNER_FC0011: fe = dvb_attach(fc0011_attach, adap->fe[0], &d->i2c_adap, &af9035_fc0011_config); break; case AF9033_TUNER_MXL5007T: if (adap->id == 0) { ret = af9035_wr_reg(d, 0x00d8e0, 1); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0x00d8e1, 1); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0x00d8df, 0); if (ret < 0) goto err; msleep(30); ret = af9035_wr_reg(d, 0x00d8df, 1); if (ret < 0) goto err; msleep(300); ret = af9035_wr_reg(d, 0x00d8c0, 1); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0x00d8c1, 1); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0x00d8bf, 0); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0x00d8b4, 1); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0x00d8b5, 1); if (ret < 0) goto err; ret = af9035_wr_reg(d, 0x00d8b3, 1); if (ret < 0) goto err; tuner_addr = 0x60; } else { tuner_addr = 0x60 | 0x80; /* I2C bus hack */ } /* attach tuner */ fe = dvb_attach(mxl5007t_attach, adap->fe[0], &d->i2c_adap, tuner_addr, &af9035_mxl5007t_config[adap->id]); break; case AF9033_TUNER_TDA18218: /* attach tuner */ fe = dvb_attach(tda18218_attach, adap->fe[0], &d->i2c_adap, &af9035_tda18218_config); break; case AF9033_TUNER_FC2580: { struct fc2580_platform_data fc2580_pdata = { .dvb_frontend = adap->fe[0], }; /* Tuner enable using gpiot2_o, gpiot2_en and gpiot2_on */ ret = af9035_wr_reg_mask(d, 0xd8eb, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8ec, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8ed, 0x01, 0x01); if (ret < 0) goto err; usleep_range(10000, 50000); /* attach tuner */ ret = af9035_add_i2c_dev(d, "fc2580", 0x56, &fc2580_pdata, &d->i2c_adap); if (ret) goto err; fe = adap->fe[0]; break; } case AF9033_TUNER_FC0012: /* * AF9035 gpiot2 = FC0012 enable * XXX: there seems to be something on gpioh8 too, but on my * test I didn't find any difference. */ if (adap->id == 0) { /* configure gpiot2 as output and high */ ret = af9035_wr_reg_mask(d, 0xd8eb, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8ec, 0x01, 0x01); if (ret < 0) goto err; ret = af9035_wr_reg_mask(d, 0xd8ed, 0x01, 0x01); if (ret < 0) goto err; } else { /* * FIXME: That belongs for the FC0012 driver. * Write 02 to FC0012 master tuner register 0d directly * in order to make slave tuner working. */ msg[0].addr = 0x63; msg[0].flags = 0; msg[0].len = 2; msg[0].buf = "\x0d\x02"; ret = i2c_transfer(&d->i2c_adap, msg, 1); if (ret < 0) goto err; } usleep_range(10000, 50000); fe = dvb_attach(fc0012_attach, adap->fe[0], &d->i2c_adap, &af9035_fc0012_config[adap->id]); break; case AF9033_TUNER_IT9135_38: case AF9033_TUNER_IT9135_51: case AF9033_TUNER_IT9135_52: case AF9033_TUNER_IT9135_60: case AF9033_TUNER_IT9135_61: case AF9033_TUNER_IT9135_62: { struct platform_device *pdev; const char *name; struct it913x_platform_data it913x_pdata = { .regmap = state->af9033_config[adap->id].regmap, .fe = adap->fe[0], }; switch (state->af9033_config[adap->id].tuner) { case AF9033_TUNER_IT9135_38: case AF9033_TUNER_IT9135_51: case AF9033_TUNER_IT9135_52: name = "it9133ax-tuner"; break; case AF9033_TUNER_IT9135_60: case AF9033_TUNER_IT9135_61: case AF9033_TUNER_IT9135_62: name = "it9133bx-tuner"; break; default: ret = -ENODEV; goto err; } if (state->dual_mode) { if (adap->id == 0) it913x_pdata.role = IT913X_ROLE_DUAL_MASTER; else it913x_pdata.role = IT913X_ROLE_DUAL_SLAVE; } else { it913x_pdata.role = IT913X_ROLE_SINGLE; } request_module("%s", "it913x"); pdev = platform_device_register_data(&d->intf->dev, name, PLATFORM_DEVID_AUTO, &it913x_pdata, sizeof(it913x_pdata)); if (IS_ERR(pdev) || !pdev->dev.driver) { ret = -ENODEV; goto err; } if (!try_module_get(pdev->dev.driver->owner)) { platform_device_unregister(pdev); ret = -ENODEV; goto err; } state->platform_device_tuner[adap->id] = pdev; fe = adap->fe[0]; break; } default: fe = NULL; } if (fe == NULL) { ret = -ENODEV; goto err; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int it930x_tuner_attach(struct dvb_usb_adapter *adap) { struct state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct usb_interface *intf = d->intf; int ret; struct si2157_config si2157_config; dev_dbg(&intf->dev, "adap->id=%d\n", adap->id); memset(&si2157_config, 0, sizeof(si2157_config)); si2157_config.fe = adap->fe[0]; /* * HACK: The Logilink VG0022A and TerraTec TC2 Stick have * a bug: when the si2157 firmware that came with the device * is replaced by a new one, the I2C transfers to the tuner * will return just 0xff. * * Probably, the vendor firmware has some patch specifically * designed for this device. So, we can't replace by the * generic firmware. The right solution would be to extract * the si2157 firmware from the original driver and ask the * driver to load the specifically designed firmware, but, * while we don't have that, the next best solution is to just * keep the original firmware at the device. */ if ((le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_DEXATEK && le16_to_cpu(d->udev->descriptor.idProduct) == 0x0100) || (le16_to_cpu(d->udev->descriptor.idVendor) == USB_VID_TERRATEC && le16_to_cpu(d->udev->descriptor.idProduct) == USB_PID_TERRATEC_CINERGY_TC2_STICK)) si2157_config.dont_load_firmware = true; si2157_config.if_port = it930x_addresses_table[state->it930x_addresses].tuner_if_port; ret = af9035_add_i2c_dev(d, "si2157", it930x_addresses_table[state->it930x_addresses].tuner_i2c_addr, &si2157_config, state->i2c_adapter_demod); if (ret) goto err; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int it930x_tuner_detach(struct dvb_usb_adapter *adap) { struct state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct usb_interface *intf = d->intf; dev_dbg(&intf->dev, "adap->id=%d\n", adap->id); if (adap->id == 1) { if (state->i2c_client[3]) af9035_del_i2c_dev(d); } else if (adap->id == 0) { if (state->i2c_client[1]) af9035_del_i2c_dev(d); } return 0; } static int af9035_tuner_detach(struct dvb_usb_adapter *adap) { struct state *state = adap_to_priv(adap); struct dvb_usb_device *d = adap_to_d(adap); struct usb_interface *intf = d->intf; dev_dbg(&intf->dev, "adap->id=%d\n", adap->id); switch (state->af9033_config[adap->id].tuner) { case AF9033_TUNER_TUA9001: case AF9033_TUNER_FC2580: if (adap->id == 1) { if (state->i2c_client[3]) af9035_del_i2c_dev(d); } else if (adap->id == 0) { if (state->i2c_client[1]) af9035_del_i2c_dev(d); } break; case AF9033_TUNER_IT9135_38: case AF9033_TUNER_IT9135_51: case AF9033_TUNER_IT9135_52: case AF9033_TUNER_IT9135_60: case AF9033_TUNER_IT9135_61: case AF9033_TUNER_IT9135_62: { struct platform_device *pdev; pdev = state->platform_device_tuner[adap->id]; if (pdev) { module_put(pdev->dev.driver->owner); platform_device_unregister(pdev); } break; } } return 0; } static int af9035_init(struct dvb_usb_device *d) { struct state *state = d_to_priv(d); struct usb_interface *intf = d->intf; int ret, i; u16 frame_size = (d->udev->speed == USB_SPEED_FULL ? 5 : 87) * 188 / 4; u8 packet_size = (d->udev->speed == USB_SPEED_FULL ? 64 : 512) / 4; struct reg_val_mask tab[] = { { 0x80f99d, 0x01, 0x01 }, { 0x80f9a4, 0x01, 0x01 }, { 0x00dd11, 0x00, 0x20 }, { 0x00dd11, 0x00, 0x40 }, { 0x00dd13, 0x00, 0x20 }, { 0x00dd13, 0x00, 0x40 }, { 0x00dd11, 0x20, 0x20 }, { 0x00dd88, (frame_size >> 0) & 0xff, 0xff}, { 0x00dd89, (frame_size >> 8) & 0xff, 0xff}, { 0x00dd0c, packet_size, 0xff}, { 0x00dd11, state->dual_mode << 6, 0x40 }, { 0x00dd8a, (frame_size >> 0) & 0xff, 0xff}, { 0x00dd8b, (frame_size >> 8) & 0xff, 0xff}, { 0x00dd0d, packet_size, 0xff }, { 0x80f9a3, state->dual_mode, 0x01 }, { 0x80f9cd, state->dual_mode, 0x01 }, { 0x80f99d, 0x00, 0x01 }, { 0x80f9a4, 0x00, 0x01 }, }; dev_dbg(&intf->dev, "USB speed=%d frame_size=%04x packet_size=%02x\n", d->udev->speed, frame_size, packet_size); /* init endpoints */ for (i = 0; i < ARRAY_SIZE(tab); i++) { ret = af9035_wr_reg_mask(d, tab[i].reg, tab[i].val, tab[i].mask); if (ret < 0) goto err; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int it930x_init(struct dvb_usb_device *d) { struct state *state = d_to_priv(d); struct usb_interface *intf = d->intf; int ret, i; u16 frame_size = (d->udev->speed == USB_SPEED_FULL ? 5 : 816) * 188 / 4; u8 packet_size = (d->udev->speed == USB_SPEED_FULL ? 64 : 512) / 4; struct reg_val_mask tab[] = { { 0x00da1a, 0x00, 0x01 }, /* ignore_sync_byte */ { 0x00f41f, 0x04, 0x04 }, /* dvbt_inten */ { 0x00da10, 0x00, 0x01 }, /* mpeg_full_speed */ { 0x00f41a, 0x01, 0x01 }, /* dvbt_en */ { 0x00da1d, 0x01, 0x01 }, /* mp2_sw_rst, reset EP4 */ { 0x00dd11, 0x00, 0x20 }, /* ep4_tx_en, disable EP4 */ { 0x00dd13, 0x00, 0x20 }, /* ep4_tx_nak, disable EP4 NAK */ { 0x00dd11, 0x20, 0x20 }, /* ep4_tx_en, enable EP4 */ { 0x00dd11, 0x00, 0x40 }, /* ep5_tx_en, disable EP5 */ { 0x00dd13, 0x00, 0x40 }, /* ep5_tx_nak, disable EP5 NAK */ { 0x00dd11, state->dual_mode << 6, 0x40 }, /* enable EP5 */ { 0x00dd88, (frame_size >> 0) & 0xff, 0xff}, { 0x00dd89, (frame_size >> 8) & 0xff, 0xff}, { 0x00dd0c, packet_size, 0xff}, { 0x00dd8a, (frame_size >> 0) & 0xff, 0xff}, { 0x00dd8b, (frame_size >> 8) & 0xff, 0xff}, { 0x00dd0d, packet_size, 0xff }, { 0x00da1d, 0x00, 0x01 }, /* mp2_sw_rst, disable */ { 0x00d833, 0x01, 0xff }, /* slew rate ctrl: slew rate boosts */ { 0x00d830, 0x00, 0xff }, /* Bit 0 of output driving control */ { 0x00d831, 0x01, 0xff }, /* Bit 1 of output driving control */ { 0x00d832, 0x00, 0xff }, /* Bit 2 of output driving control */ /* suspend gpio1 for TS-C */ { 0x00d8b0, 0x01, 0xff }, /* gpio1 */ { 0x00d8b1, 0x01, 0xff }, /* gpio1 */ { 0x00d8af, 0x00, 0xff }, /* gpio1 */ /* suspend gpio7 for TS-D */ { 0x00d8c4, 0x01, 0xff }, /* gpio7 */ { 0x00d8c5, 0x01, 0xff }, /* gpio7 */ { 0x00d8c3, 0x00, 0xff }, /* gpio7 */ /* suspend gpio13 for TS-B */ { 0x00d8dc, 0x01, 0xff }, /* gpio13 */ { 0x00d8dd, 0x01, 0xff }, /* gpio13 */ { 0x00d8db, 0x00, 0xff }, /* gpio13 */ /* suspend gpio14 for TS-E */ { 0x00d8e4, 0x01, 0xff }, /* gpio14 */ { 0x00d8e5, 0x01, 0xff }, /* gpio14 */ { 0x00d8e3, 0x00, 0xff }, /* gpio14 */ /* suspend gpio15 for TS-A */ { 0x00d8e8, 0x01, 0xff }, /* gpio15 */ { 0x00d8e9, 0x01, 0xff }, /* gpio15 */ { 0x00d8e7, 0x00, 0xff }, /* gpio15 */ { 0x00da58, 0x00, 0x01 }, /* ts_in_src, serial */ { 0x00da73, 0x01, 0xff }, /* ts0_aggre_mode */ { 0x00da78, 0x47, 0xff }, /* ts0_sync_byte */ { 0x00da4c, 0x01, 0xff }, /* ts0_en */ { 0x00da5a, 0x1f, 0xff }, /* ts_fail_ignore */ }; dev_dbg(&intf->dev, "USB speed=%d frame_size=%04x packet_size=%02x\n", d->udev->speed, frame_size, packet_size); /* init endpoints */ for (i = 0; i < ARRAY_SIZE(tab); i++) { ret = af9035_wr_reg_mask(d, tab[i].reg, tab[i].val, tab[i].mask); if (ret < 0) goto err; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } #if IS_ENABLED(CONFIG_RC_CORE) static int af9035_rc_query(struct dvb_usb_device *d) { struct usb_interface *intf = d->intf; int ret; enum rc_proto proto; u32 key; u8 buf[4]; struct usb_req req = { CMD_IR_GET, 0, 0, NULL, 4, buf }; ret = af9035_ctrl_msg(d, &req); if (ret == 1) return 0; else if (ret < 0) goto err; if ((buf[2] + buf[3]) == 0xff) { if ((buf[0] + buf[1]) == 0xff) { /* NEC standard 16bit */ key = RC_SCANCODE_NEC(buf[0], buf[2]); proto = RC_PROTO_NEC; } else { /* NEC extended 24bit */ key = RC_SCANCODE_NECX(buf[0] << 8 | buf[1], buf[2]); proto = RC_PROTO_NECX; } } else { /* NEC full code 32bit */ key = RC_SCANCODE_NEC32(buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]); proto = RC_PROTO_NEC32; } dev_dbg(&intf->dev, "%*ph\n", 4, buf); rc_keydown(d->rc_dev, proto, key, 0); return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int af9035_get_rc_config(struct dvb_usb_device *d, struct dvb_usb_rc *rc) { struct state *state = d_to_priv(d); struct usb_interface *intf = d->intf; dev_dbg(&intf->dev, "ir_mode=%02x ir_type=%02x\n", state->ir_mode, state->ir_type); /* don't activate rc if in HID mode or if not available */ if (state->ir_mode == 0x05) { switch (state->ir_type) { case 0: /* NEC */ default: rc->allowed_protos = RC_PROTO_BIT_NEC | RC_PROTO_BIT_NECX | RC_PROTO_BIT_NEC32; break; case 1: /* RC6 */ rc->allowed_protos = RC_PROTO_BIT_RC6_MCE; break; } rc->query = af9035_rc_query; rc->interval = 500; /* load empty to enable rc */ if (!rc->map_name) rc->map_name = RC_MAP_EMPTY; } return 0; } #else #define af9035_get_rc_config NULL #endif static int af9035_get_stream_config(struct dvb_frontend *fe, u8 *ts_type, struct usb_data_stream_properties *stream) { struct dvb_usb_device *d = fe_to_d(fe); struct usb_interface *intf = d->intf; dev_dbg(&intf->dev, "adap=%d\n", fe_to_adap(fe)->id); if (d->udev->speed == USB_SPEED_FULL) stream->u.bulk.buffersize = 5 * 188; return 0; } static int af9035_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct state *state = adap_to_priv(adap); return state->ops.pid_filter_ctrl(adap->fe[0], onoff); } static int af9035_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { struct state *state = adap_to_priv(adap); return state->ops.pid_filter(adap->fe[0], index, pid, onoff); } static int af9035_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); char manufacturer[sizeof("Afatech")]; memset(manufacturer, 0, sizeof(manufacturer)); usb_string(udev, udev->descriptor.iManufacturer, manufacturer, sizeof(manufacturer)); /* * There is two devices having same ID but different chipset. One uses * AF9015 and the other IT9135 chipset. Only difference seen on lsusb * is iManufacturer string. * * idVendor 0x0ccd TerraTec Electronic GmbH * idProduct 0x0099 * bcdDevice 2.00 * iManufacturer 1 Afatech * iProduct 2 DVB-T 2 * * idVendor 0x0ccd TerraTec Electronic GmbH * idProduct 0x0099 * bcdDevice 2.00 * iManufacturer 1 ITE Technologies, Inc. * iProduct 2 DVB-T TV Stick */ if ((le16_to_cpu(udev->descriptor.idVendor) == USB_VID_TERRATEC) && (le16_to_cpu(udev->descriptor.idProduct) == 0x0099)) { if (!strcmp("Afatech", manufacturer)) { dev_dbg(&udev->dev, "rejecting device\n"); return -ENODEV; } } return dvb_usbv2_probe(intf, id); } /* interface 0 is used by DVB-T receiver and interface 1 is for remote controller (HID) */ static const struct dvb_usb_device_properties af9035_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct state), .generic_bulk_ctrl_endpoint = 0x02, .generic_bulk_ctrl_endpoint_response = 0x81, .identify_state = af9035_identify_state, .download_firmware = af9035_download_firmware, .i2c_algo = &af9035_i2c_algo, .read_config = af9035_read_config, .frontend_attach = af9035_frontend_attach, .frontend_detach = af9035_frontend_detach, .tuner_attach = af9035_tuner_attach, .tuner_detach = af9035_tuner_detach, .init = af9035_init, .get_rc_config = af9035_get_rc_config, .get_stream_config = af9035_get_stream_config, .get_adapter_count = af9035_get_adapter_count, .adapter = { { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter_ctrl = af9035_pid_filter_ctrl, .pid_filter = af9035_pid_filter, .stream = DVB_USB_STREAM_BULK(0x84, 6, 87 * 188), }, { .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .pid_filter_count = 32, .pid_filter_ctrl = af9035_pid_filter_ctrl, .pid_filter = af9035_pid_filter, .stream = DVB_USB_STREAM_BULK(0x85, 6, 87 * 188), }, }, }; static const struct dvb_usb_device_properties it930x_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .adapter_nr = adapter_nr, .size_of_priv = sizeof(struct state), .generic_bulk_ctrl_endpoint = 0x02, .generic_bulk_ctrl_endpoint_response = 0x81, .identify_state = af9035_identify_state, .download_firmware = af9035_download_firmware, .i2c_algo = &af9035_i2c_algo, .read_config = af9035_read_config, .frontend_attach = it930x_frontend_attach, .frontend_detach = af9035_frontend_detach, .tuner_attach = it930x_tuner_attach, .tuner_detach = it930x_tuner_detach, .init = it930x_init, /* * dvb_usbv2_remote_init() calls rc_config() only for those devices * which have non-empty rc_map, so it's safe to enable it for every IT930x */ .get_rc_config = af9035_get_rc_config, .get_stream_config = af9035_get_stream_config, .get_adapter_count = af9035_get_adapter_count, .adapter = { { .stream = DVB_USB_STREAM_BULK(0x84, 4, 816 * 188), }, { .stream = DVB_USB_STREAM_BULK(0x85, 4, 816 * 188), }, }, }; static const struct usb_device_id af9035_id_table[] = { /* AF9035 devices */ { DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_9035, &af9035_props, "Afatech AF9035 reference design", NULL) }, { DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1000, &af9035_props, "Afatech AF9035 reference design", NULL) }, { DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1001, &af9035_props, "Afatech AF9035 reference design", NULL) }, { DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1002, &af9035_props, "Afatech AF9035 reference design", NULL) }, { DVB_USB_DEVICE(USB_VID_AFATECH, USB_PID_AFATECH_AF9035_1003, &af9035_props, "Afatech AF9035 reference design", NULL) }, { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_STICK, &af9035_props, "TerraTec Cinergy T Stick", NULL) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835, &af9035_props, "AVerMedia AVerTV Volar HD/PRO (A835)", NULL) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_B835, &af9035_props, "AVerMedia AVerTV Volar HD/PRO (A835)", NULL) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_1867, &af9035_props, "AVerMedia HD Volar (A867)", NULL) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A867, &af9035_props, "AVerMedia HD Volar (A867)", NULL) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TWINSTAR, &af9035_props, "AVerMedia Twinstar (A825)", NULL) }, { DVB_USB_DEVICE(USB_VID_ASUS, USB_PID_ASUS_U3100MINI_PLUS, &af9035_props, "Asus U3100Mini Plus", NULL) }, { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x00aa, &af9035_props, "TerraTec Cinergy T Stick (rev. 2)", NULL) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, 0x0337, &af9035_props, "AVerMedia HD Volar (A867)", NULL) }, { DVB_USB_DEVICE(USB_VID_GTEK, USB_PID_EVOLVEO_XTRATV_STICK, &af9035_props, "EVOLVEO XtraTV stick", NULL) }, /* IT9135 devices */ { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135, &af9035_props, "ITE 9135 Generic", RC_MAP_IT913X_V1) }, { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9005, &af9035_props, "ITE 9135(9005) Generic", RC_MAP_IT913X_V2) }, { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9135_9006, &af9035_props, "ITE 9135(9006) Generic", RC_MAP_IT913X_V1) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_1835, &af9035_props, "Avermedia A835B(1835)", RC_MAP_IT913X_V2) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_2835, &af9035_props, "Avermedia A835B(2835)", RC_MAP_IT913X_V2) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_3835, &af9035_props, "Avermedia A835B(3835)", RC_MAP_IT913X_V2) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_A835B_4835, &af9035_props, "Avermedia A835B(4835)", RC_MAP_IT913X_V2) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD110, &af9035_props, "Avermedia AverTV Volar HD 2 (TD110)", RC_MAP_AVERMEDIA_RM_KS) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_H335, &af9035_props, "Avermedia H335", RC_MAP_IT913X_V2) }, { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_KWORLD_UB499_2T_T09, &af9035_props, "Kworld UB499-2T T09", RC_MAP_IT913X_V1) }, { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_SVEON_STV22_IT9137, &af9035_props, "Sveon STV22 Dual DVB-T HDTV", RC_MAP_IT913X_V1) }, { DVB_USB_DEVICE(USB_VID_KWORLD_2, USB_PID_CTVDIGDUAL_V2, &af9035_props, "Digital Dual TV Receiver CTVDIGDUAL_V2", RC_MAP_IT913X_V1) }, { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_T1, &af9035_props, "TerraTec T1", RC_MAP_IT913X_V1) }, /* XXX: that same ID [0ccd:0099] is used by af9015 driver too */ { DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099, &af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) }, { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05, &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) }, { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900, &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) }, { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_78E, &af9035_props, "PCTV AndroiDTV (78e)", RC_MAP_IT913X_V1) }, { DVB_USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_79E, &af9035_props, "PCTV microStick (79e)", RC_MAP_IT913X_V2) }, /* IT930x devices */ { DVB_USB_DEVICE(USB_VID_ITETECH, USB_PID_ITETECH_IT9303, &it930x_props, "ITE 9303 Generic", NULL) }, { DVB_USB_DEVICE(USB_VID_AVERMEDIA, USB_PID_AVERMEDIA_TD310, &it930x_props, "AVerMedia TD310 DVB-T2", RC_MAP_AVERMEDIA_RM_KS) }, { DVB_USB_DEVICE(USB_VID_DEXATEK, 0x0100, &it930x_props, "Logilink VG0022A", NULL) }, { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_TC2_STICK, &it930x_props, "TerraTec Cinergy TC2 Stick", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, af9035_id_table); static struct usb_driver af9035_usb_driver = { .name = KBUILD_MODNAME, .id_table = af9035_id_table, .probe = af9035_probe, .disconnect = dvb_usbv2_disconnect, .suspend = dvb_usbv2_suspend, .resume = dvb_usbv2_resume, .reset_resume = dvb_usbv2_reset_resume, .no_dynamic_id = 1, .soft_unbind = 1, }; module_usb_driver(af9035_usb_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Afatech AF9035 driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(AF9035_FIRMWARE_AF9035); MODULE_FIRMWARE(AF9035_FIRMWARE_IT9135_V1); MODULE_FIRMWARE(AF9035_FIRMWARE_IT9135_V2); MODULE_FIRMWARE(AF9035_FIRMWARE_IT9303);
2 2 1 1 5 2 3 2 3 2 1 2 2 1 3 4 4 4 10 1 7 2 2 7 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 // SPDX-License-Identifier: GPL-2.0-only /* * Simple USB RGB LED driver * * Copyright 2016 Heiner Kallweit <hkallweit1@gmail.com> * Based on drivers/hid/hid-thingm.c and * drivers/usb/misc/usbled.c */ #include <linux/hid.h> #include <linux/hidraw.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/mutex.h> #include "hid-ids.h" enum hidled_report_type { RAW_REQUEST, OUTPUT_REPORT }; enum hidled_type { RISO_KAGAKU, DREAM_CHEEKY, THINGM, DELCOM, LUXAFOR, }; static unsigned const char riso_kagaku_tbl[] = { /* R+2G+4B -> riso kagaku color index */ [0] = 0, /* black */ [1] = 2, /* red */ [2] = 1, /* green */ [3] = 5, /* yellow */ [4] = 3, /* blue */ [5] = 6, /* magenta */ [6] = 4, /* cyan */ [7] = 7 /* white */ }; #define RISO_KAGAKU_IX(r, g, b) riso_kagaku_tbl[((r)?1:0)+((g)?2:0)+((b)?4:0)] union delcom_packet { __u8 data[8]; struct { __u8 major_cmd; __u8 minor_cmd; __u8 data_lsb; __u8 data_msb; } tx; struct { __u8 cmd; } rx; struct { __le16 family_code; __le16 security_code; __u8 fw_version; } fw; }; #define DELCOM_GREEN_LED 0 #define DELCOM_RED_LED 1 #define DELCOM_BLUE_LED 2 struct hidled_device; struct hidled_rgb; struct hidled_config { enum hidled_type type; const char *name; const char *short_name; enum led_brightness max_brightness; int num_leds; size_t report_size; enum hidled_report_type report_type; int (*init)(struct hidled_device *ldev); int (*write)(struct led_classdev *cdev, enum led_brightness br); }; struct hidled_led { struct led_classdev cdev; struct hidled_rgb *rgb; char name[32]; }; struct hidled_rgb { struct hidled_device *ldev; struct hidled_led red; struct hidled_led green; struct hidled_led blue; u8 num; }; struct hidled_device { const struct hidled_config *config; struct hid_device *hdev; struct hidled_rgb *rgb; u8 *buf; struct mutex lock; }; #define MAX_REPORT_SIZE 16 #define to_hidled_led(arg) container_of(arg, struct hidled_led, cdev) static bool riso_kagaku_switch_green_blue; module_param(riso_kagaku_switch_green_blue, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(riso_kagaku_switch_green_blue, "switch green and blue RGB component for Riso Kagaku devices"); static int hidled_send(struct hidled_device *ldev, __u8 *buf) { int ret; mutex_lock(&ldev->lock); /* * buffer provided to hid_hw_raw_request must not be on the stack * and must not be part of a data structure */ memcpy(ldev->buf, buf, ldev->config->report_size); if (ldev->config->report_type == RAW_REQUEST) ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, ldev->config->report_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); else if (ldev->config->report_type == OUTPUT_REPORT) ret = hid_hw_output_report(ldev->hdev, ldev->buf, ldev->config->report_size); else ret = -EINVAL; mutex_unlock(&ldev->lock); if (ret < 0) return ret; return ret == ldev->config->report_size ? 0 : -EMSGSIZE; } /* reading data is supported for report type RAW_REQUEST only */ static int hidled_recv(struct hidled_device *ldev, __u8 *buf) { int ret; if (ldev->config->report_type != RAW_REQUEST) return -EINVAL; mutex_lock(&ldev->lock); memcpy(ldev->buf, buf, ldev->config->report_size); ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, ldev->config->report_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) goto err; ret = hid_hw_raw_request(ldev->hdev, buf[0], ldev->buf, ldev->config->report_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); memcpy(buf, ldev->buf, ldev->config->report_size); err: mutex_unlock(&ldev->lock); return ret < 0 ? ret : 0; } static u8 riso_kagaku_index(struct hidled_rgb *rgb) { enum led_brightness r, g, b; r = rgb->red.cdev.brightness; g = rgb->green.cdev.brightness; b = rgb->blue.cdev.brightness; if (riso_kagaku_switch_green_blue) return RISO_KAGAKU_IX(r, b, g); else return RISO_KAGAKU_IX(r, g, b); } static int riso_kagaku_write(struct led_classdev *cdev, enum led_brightness br) { struct hidled_led *led = to_hidled_led(cdev); struct hidled_rgb *rgb = led->rgb; __u8 buf[MAX_REPORT_SIZE] = {}; buf[1] = riso_kagaku_index(rgb); return hidled_send(rgb->ldev, buf); } static int dream_cheeky_write(struct led_classdev *cdev, enum led_brightness br) { struct hidled_led *led = to_hidled_led(cdev); struct hidled_rgb *rgb = led->rgb; __u8 buf[MAX_REPORT_SIZE] = {}; buf[1] = rgb->red.cdev.brightness; buf[2] = rgb->green.cdev.brightness; buf[3] = rgb->blue.cdev.brightness; buf[7] = 0x1a; buf[8] = 0x05; return hidled_send(rgb->ldev, buf); } static int dream_cheeky_init(struct hidled_device *ldev) { __u8 buf[MAX_REPORT_SIZE] = {}; /* Dream Cheeky magic */ buf[1] = 0x1f; buf[2] = 0x02; buf[4] = 0x5f; buf[7] = 0x1a; buf[8] = 0x03; return hidled_send(ldev, buf); } static int _thingm_write(struct led_classdev *cdev, enum led_brightness br, u8 offset) { struct hidled_led *led = to_hidled_led(cdev); __u8 buf[MAX_REPORT_SIZE] = { 1, 'c' }; buf[2] = led->rgb->red.cdev.brightness; buf[3] = led->rgb->green.cdev.brightness; buf[4] = led->rgb->blue.cdev.brightness; buf[7] = led->rgb->num + offset; return hidled_send(led->rgb->ldev, buf); } static int thingm_write_v1(struct led_classdev *cdev, enum led_brightness br) { return _thingm_write(cdev, br, 0); } static int thingm_write(struct led_classdev *cdev, enum led_brightness br) { return _thingm_write(cdev, br, 1); } static const struct hidled_config hidled_config_thingm_v1 = { .name = "ThingM blink(1) v1", .short_name = "thingm", .max_brightness = 255, .num_leds = 1, .report_size = 9, .report_type = RAW_REQUEST, .write = thingm_write_v1, }; static int thingm_init(struct hidled_device *ldev) { __u8 buf[MAX_REPORT_SIZE] = { 1, 'v' }; int ret; ret = hidled_recv(ldev, buf); if (ret) return ret; /* Check for firmware major version 1 */ if (buf[3] == '1') ldev->config = &hidled_config_thingm_v1; return 0; } static inline int delcom_get_lednum(const struct hidled_led *led) { if (led == &led->rgb->red) return DELCOM_RED_LED; else if (led == &led->rgb->green) return DELCOM_GREEN_LED; else return DELCOM_BLUE_LED; } static int delcom_enable_led(struct hidled_led *led) { union delcom_packet dp = { .tx.major_cmd = 101, .tx.minor_cmd = 12 }; dp.tx.data_lsb = 1 << delcom_get_lednum(led); dp.tx.data_msb = 0; return hidled_send(led->rgb->ldev, dp.data); } static int delcom_set_pwm(struct hidled_led *led) { union delcom_packet dp = { .tx.major_cmd = 101, .tx.minor_cmd = 34 }; dp.tx.data_lsb = delcom_get_lednum(led); dp.tx.data_msb = led->cdev.brightness; return hidled_send(led->rgb->ldev, dp.data); } static int delcom_write(struct led_classdev *cdev, enum led_brightness br) { struct hidled_led *led = to_hidled_led(cdev); int ret; /* * enable LED * We can't do this in the init function already because the device * is internally reset later. */ ret = delcom_enable_led(led); if (ret) return ret; return delcom_set_pwm(led); } static int delcom_init(struct hidled_device *ldev) { union delcom_packet dp = { .rx.cmd = 104 }; int ret; ret = hidled_recv(ldev, dp.data); if (ret) return ret; /* * Several Delcom devices share the same USB VID/PID * Check for family id 2 for Visual Signal Indicator */ return le16_to_cpu(dp.fw.family_code) == 2 ? 0 : -ENODEV; } static int luxafor_write(struct led_classdev *cdev, enum led_brightness br) { struct hidled_led *led = to_hidled_led(cdev); __u8 buf[MAX_REPORT_SIZE] = { [1] = 1 }; buf[2] = led->rgb->num + 1; buf[3] = led->rgb->red.cdev.brightness; buf[4] = led->rgb->green.cdev.brightness; buf[5] = led->rgb->blue.cdev.brightness; return hidled_send(led->rgb->ldev, buf); } static const struct hidled_config hidled_configs[] = { { .type = RISO_KAGAKU, .name = "Riso Kagaku Webmail Notifier", .short_name = "riso_kagaku", .max_brightness = 1, .num_leds = 1, .report_size = 6, .report_type = OUTPUT_REPORT, .write = riso_kagaku_write, }, { .type = DREAM_CHEEKY, .name = "Dream Cheeky Webmail Notifier", .short_name = "dream_cheeky", .max_brightness = 63, .num_leds = 1, .report_size = 9, .report_type = RAW_REQUEST, .init = dream_cheeky_init, .write = dream_cheeky_write, }, { .type = THINGM, .name = "ThingM blink(1)", .short_name = "thingm", .max_brightness = 255, .num_leds = 2, .report_size = 9, .report_type = RAW_REQUEST, .init = thingm_init, .write = thingm_write, }, { .type = DELCOM, .name = "Delcom Visual Signal Indicator G2", .short_name = "delcom", .max_brightness = 100, .num_leds = 1, .report_size = 8, .report_type = RAW_REQUEST, .init = delcom_init, .write = delcom_write, }, { .type = LUXAFOR, .name = "Greynut Luxafor", .short_name = "luxafor", .max_brightness = 255, .num_leds = 6, .report_size = 9, .report_type = OUTPUT_REPORT, .write = luxafor_write, }, }; static int hidled_init_led(struct hidled_led *led, const char *color_name, struct hidled_rgb *rgb, unsigned int minor) { const struct hidled_config *config = rgb->ldev->config; if (config->num_leds > 1) snprintf(led->name, sizeof(led->name), "%s%u:%s:led%u", config->short_name, minor, color_name, rgb->num); else snprintf(led->name, sizeof(led->name), "%s%u:%s", config->short_name, minor, color_name); led->cdev.name = led->name; led->cdev.max_brightness = config->max_brightness; led->cdev.brightness_set_blocking = config->write; led->cdev.flags = LED_HW_PLUGGABLE; led->rgb = rgb; return devm_led_classdev_register(&rgb->ldev->hdev->dev, &led->cdev); } static int hidled_init_rgb(struct hidled_rgb *rgb, unsigned int minor) { int ret; /* Register the red diode */ ret = hidled_init_led(&rgb->red, "red", rgb, minor); if (ret) return ret; /* Register the green diode */ ret = hidled_init_led(&rgb->green, "green", rgb, minor); if (ret) return ret; /* Register the blue diode */ return hidled_init_led(&rgb->blue, "blue", rgb, minor); } static int hidled_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct hidled_device *ldev; unsigned int minor; int ret, i; ldev = devm_kzalloc(&hdev->dev, sizeof(*ldev), GFP_KERNEL); if (!ldev) return -ENOMEM; ldev->buf = devm_kmalloc(&hdev->dev, MAX_REPORT_SIZE, GFP_KERNEL); if (!ldev->buf) return -ENOMEM; ret = hid_parse(hdev); if (ret) return ret; ldev->hdev = hdev; mutex_init(&ldev->lock); for (i = 0; !ldev->config && i < ARRAY_SIZE(hidled_configs); i++) if (hidled_configs[i].type == id->driver_data) ldev->config = &hidled_configs[i]; if (!ldev->config) return -EINVAL; if (ldev->config->init) { ret = ldev->config->init(ldev); if (ret) return ret; } ldev->rgb = devm_kcalloc(&hdev->dev, ldev->config->num_leds, sizeof(struct hidled_rgb), GFP_KERNEL); if (!ldev->rgb) return -ENOMEM; ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) return ret; minor = ((struct hidraw *) hdev->hidraw)->minor; for (i = 0; i < ldev->config->num_leds; i++) { ldev->rgb[i].ldev = ldev; ldev->rgb[i].num = i; ret = hidled_init_rgb(&ldev->rgb[i], minor); if (ret) { hid_hw_stop(hdev); return ret; } } hid_info(hdev, "%s initialized\n", ldev->config->name); return 0; } static const struct hid_device_id hidled_table[] = { { HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL), .driver_data = RISO_KAGAKU }, { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN), .driver_data = DREAM_CHEEKY }, { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA), .driver_data = DREAM_CHEEKY }, { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1), .driver_data = THINGM }, { HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND), .driver_data = DELCOM }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR), .driver_data = LUXAFOR }, { } }; MODULE_DEVICE_TABLE(hid, hidled_table); static struct hid_driver hidled_driver = { .name = "hid-led", .probe = hidled_probe, .id_table = hidled_table, }; module_hid_driver(hidled_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Heiner Kallweit <hkallweit1@gmail.com>"); MODULE_DESCRIPTION("Simple USB RGB LED driver");
78074 24754 21515 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_JUMP_LABEL_H #define _ASM_X86_JUMP_LABEL_H #define HAVE_JUMP_LABEL_BATCH #include <asm/asm.h> #include <asm/nops.h> #ifndef __ASSEMBLER__ #include <linux/stringify.h> #include <linux/types.h> #define JUMP_TABLE_ENTRY(key, label) \ ".pushsection __jump_table, \"aw\" \n\t" \ _ASM_ALIGN "\n\t" \ ".long 1b - . \n\t" \ ".long " label " - . \n\t" \ _ASM_PTR " " key " - . \n\t" \ ".popsection \n\t" /* This macro is also expanded on the Rust side. */ #ifdef CONFIG_HAVE_JUMP_LABEL_HACK #define ARCH_STATIC_BRANCH_ASM(key, label) \ "1: jmp " label " # objtool NOPs this \n\t" \ JUMP_TABLE_ENTRY(key " + 2", label) #else /* !CONFIG_HAVE_JUMP_LABEL_HACK */ #define ARCH_STATIC_BRANCH_ASM(key, label) \ "1: .byte " __stringify(BYTES_NOP5) "\n\t" \ JUMP_TABLE_ENTRY(key, label) #endif /* CONFIG_HAVE_JUMP_LABEL_HACK */ static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { asm goto(ARCH_STATIC_BRANCH_ASM("%c0 + %c1", "%l[l_yes]") : : "i" (key), "i" (branch) : : l_yes); return false; l_yes: return true; } static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) { asm goto("1:" "jmp %l[l_yes]\n\t" JUMP_TABLE_ENTRY("%c0 + %c1", "%l[l_yes]") : : "i" (key), "i" (branch) : : l_yes); return false; l_yes: return true; } extern int arch_jump_entry_size(struct jump_entry *entry); #endif /* __ASSEMBLER__ */ #endif
413 5 1 15 10 304 12 1171 1174 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 /* SPDX-License-Identifier: GPL-2.0 */ /* * bvec iterator * * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> */ #ifndef __LINUX_BVEC_H #define __LINUX_BVEC_H #include <linux/highmem.h> #include <linux/bug.h> #include <linux/errno.h> #include <linux/limits.h> #include <linux/minmax.h> #include <linux/types.h> struct page; /** * struct bio_vec - a contiguous range of physical memory addresses * @bv_page: First page associated with the address range. * @bv_len: Number of bytes in the address range. * @bv_offset: Start of the address range relative to the start of @bv_page. * * The following holds for a bvec if n * PAGE_SIZE < bv_offset + bv_len: * * nth_page(@bv_page, n) == @bv_page + n * * This holds because page_is_mergeable() checks the above property. */ struct bio_vec { struct page *bv_page; unsigned int bv_len; unsigned int bv_offset; }; /** * bvec_set_page - initialize a bvec based off a struct page * @bv: bvec to initialize * @page: page the bvec should point to * @len: length of the bvec * @offset: offset into the page */ static inline void bvec_set_page(struct bio_vec *bv, struct page *page, unsigned int len, unsigned int offset) { bv->bv_page = page; bv->bv_len = len; bv->bv_offset = offset; } /** * bvec_set_folio - initialize a bvec based off a struct folio * @bv: bvec to initialize * @folio: folio the bvec should point to * @len: length of the bvec * @offset: offset into the folio */ static inline void bvec_set_folio(struct bio_vec *bv, struct folio *folio, size_t len, size_t offset) { unsigned long nr = offset / PAGE_SIZE; WARN_ON_ONCE(len > UINT_MAX); bvec_set_page(bv, folio_page(folio, nr), len, offset % PAGE_SIZE); } /** * bvec_set_virt - initialize a bvec based on a virtual address * @bv: bvec to initialize * @vaddr: virtual address to set the bvec to * @len: length of the bvec */ static inline void bvec_set_virt(struct bio_vec *bv, void *vaddr, unsigned int len) { bvec_set_page(bv, virt_to_page(vaddr), len, offset_in_page(vaddr)); } struct bvec_iter { sector_t bi_sector; /* device address in 512 byte sectors */ unsigned int bi_size; /* residual I/O count */ unsigned int bi_idx; /* current index into bvl_vec */ unsigned int bi_bvec_done; /* number of bytes completed in current bvec */ } __packed __aligned(4); struct bvec_iter_all { struct bio_vec bv; int idx; unsigned done; }; /* * various member access, note that bio_data should of course not be used * on highmem page vectors */ #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) /* multi-page (mp_bvec) helpers */ #define mp_bvec_iter_page(bvec, iter) \ (__bvec_iter_bvec((bvec), (iter))->bv_page) #define mp_bvec_iter_len(bvec, iter) \ min((iter).bi_size, \ __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) #define mp_bvec_iter_offset(bvec, iter) \ (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) #define mp_bvec_iter_page_idx(bvec, iter) \ (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE) #define mp_bvec_iter_bvec(bvec, iter) \ ((struct bio_vec) { \ .bv_page = mp_bvec_iter_page((bvec), (iter)), \ .bv_len = mp_bvec_iter_len((bvec), (iter)), \ .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \ }) /* For building single-page bvec in flight */ #define bvec_iter_offset(bvec, iter) \ (mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE) #define bvec_iter_len(bvec, iter) \ min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \ PAGE_SIZE - bvec_iter_offset((bvec), (iter))) #define bvec_iter_page(bvec, iter) \ (mp_bvec_iter_page((bvec), (iter)) + \ mp_bvec_iter_page_idx((bvec), (iter))) #define bvec_iter_bvec(bvec, iter) \ ((struct bio_vec) { \ .bv_page = bvec_iter_page((bvec), (iter)), \ .bv_len = bvec_iter_len((bvec), (iter)), \ .bv_offset = bvec_iter_offset((bvec), (iter)), \ }) static inline bool bvec_iter_advance(const struct bio_vec *bv, struct bvec_iter *iter, unsigned bytes) { unsigned int idx = iter->bi_idx; if (WARN_ONCE(bytes > iter->bi_size, "Attempted to advance past end of bvec iter\n")) { iter->bi_size = 0; return false; } iter->bi_size -= bytes; bytes += iter->bi_bvec_done; while (bytes && bytes >= bv[idx].bv_len) { bytes -= bv[idx].bv_len; idx++; } iter->bi_idx = idx; iter->bi_bvec_done = bytes; return true; } /* * A simpler version of bvec_iter_advance(), @bytes should not span * across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len */ static inline void bvec_iter_advance_single(const struct bio_vec *bv, struct bvec_iter *iter, unsigned int bytes) { unsigned int done = iter->bi_bvec_done + bytes; if (done == bv[iter->bi_idx].bv_len) { done = 0; iter->bi_idx++; } iter->bi_bvec_done = done; iter->bi_size -= bytes; } #define for_each_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len)) #define for_each_mp_bvec(bvl, bio_vec, iter, start) \ for (iter = (start); \ (iter).bi_size && \ ((bvl = mp_bvec_iter_bvec((bio_vec), (iter))), 1); \ bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len)) /* for iterating one bio from start to end */ #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ { \ .bi_sector = 0, \ .bi_size = UINT_MAX, \ .bi_idx = 0, \ .bi_bvec_done = 0, \ } static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) { iter_all->done = 0; iter_all->idx = 0; return &iter_all->bv; } static inline void bvec_advance(const struct bio_vec *bvec, struct bvec_iter_all *iter_all) { struct bio_vec *bv = &iter_all->bv; if (iter_all->done) { bv->bv_page++; bv->bv_offset = 0; } else { bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT); bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; } bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, bvec->bv_len - iter_all->done); iter_all->done += bv->bv_len; if (iter_all->done == bvec->bv_len) { iter_all->idx++; iter_all->done = 0; } } /** * bvec_kmap_local - map a bvec into the kernel virtual address space * @bvec: bvec to map * * Must be called on single-page bvecs only. Call kunmap_local on the returned * address to unmap. */ static inline void *bvec_kmap_local(struct bio_vec *bvec) { return kmap_local_page(bvec->bv_page) + bvec->bv_offset; } /** * memcpy_from_bvec - copy data from a bvec * @bvec: bvec to copy from * * Must be called on single-page bvecs only. */ static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec) { memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len); } /** * memcpy_to_bvec - copy data to a bvec * @bvec: bvec to copy to * * Must be called on single-page bvecs only. */ static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from) { memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len); } /** * memzero_bvec - zero all data in a bvec * @bvec: bvec to zero * * Must be called on single-page bvecs only. */ static inline void memzero_bvec(struct bio_vec *bvec) { memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len); } /** * bvec_virt - return the virtual address for a bvec * @bvec: bvec to return the virtual address for * * Note: the caller must ensure that @bvec->bv_page is not a highmem page. */ static inline void *bvec_virt(struct bio_vec *bvec) { WARN_ON_ONCE(PageHighMem(bvec->bv_page)); return page_address(bvec->bv_page) + bvec->bv_offset; } /** * bvec_phys - return the physical address for a bvec * @bvec: bvec to return the physical address for */ static inline phys_addr_t bvec_phys(const struct bio_vec *bvec) { return page_to_phys(bvec->bv_page) + bvec->bv_offset; } #endif /* __LINUX_BVEC_H */
9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 // SPDX-License-Identifier: GPL-2.0-only /* * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 * * bitmap_create - sets up the bitmap structure * bitmap_destroy - destroys the bitmap structure * * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.: * - added disk storage for bitmap * - changes to allow various bitmap chunk sizes */ /* * Still to do: * * flush after percent set rather than just time based. (maybe both). */ #include <linux/blkdev.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/sched.h> #include <linux/list.h> #include <linux/file.h> #include <linux/mount.h> #include <linux/buffer_head.h> #include <linux/seq_file.h> #include <trace/events/block.h> #include "md.h" #include "md-bitmap.h" #include "md-cluster.h" #define BITMAP_MAJOR_LO 3 /* version 4 insists the bitmap is in little-endian order * with version 3, it is host-endian which is non-portable * Version 5 is currently set only for clustered devices */ #define BITMAP_MAJOR_HI 4 #define BITMAP_MAJOR_CLUSTERED 5 #define BITMAP_MAJOR_HOSTENDIAN 3 /* * in-memory bitmap: * * Use 16 bit block counters to track pending writes to each "chunk". * The 2 high order bits are special-purpose, the first is a flag indicating * whether a resync is needed. The second is a flag indicating whether a * resync is active. * This means that the counter is actually 14 bits: * * +--------+--------+------------------------------------------------+ * | resync | resync | counter | * | needed | active | | * | (0-1) | (0-1) | (0-16383) | * +--------+--------+------------------------------------------------+ * * The "resync needed" bit is set when: * a '1' bit is read from storage at startup. * a write request fails on some drives * a resync is aborted on a chunk with 'resync active' set * It is cleared (and resync-active set) when a resync starts across all drives * of the chunk. * * * The "resync active" bit is set when: * a resync is started on all drives, and resync_needed is set. * resync_needed will be cleared (as long as resync_active wasn't already set). * It is cleared when a resync completes. * * The counter counts pending write requests, plus the on-disk bit. * When the counter is '1' and the resync bits are clear, the on-disk * bit can be cleared as well, thus setting the counter to 0. * When we set a bit, or in the counter (to start a write), if the fields is * 0, we first set the disk bit and set the counter to 1. * * If the counter is 0, the on-disk bit is clear and the stripe is clean * Anything that dirties the stripe pushes the counter to 2 (at least) * and sets the on-disk bit (lazily). * If a periodic sweep find the counter at 2, it is decremented to 1. * If the sweep find the counter at 1, the on-disk bit is cleared and the * counter goes to zero. * * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block * counters as a fallback when "page" memory cannot be allocated: * * Normal case (page memory allocated): * * page pointer (32-bit) * * [ ] ------+ * | * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters) * c1 c2 c2048 * * Hijacked case (page memory allocation failed): * * hijacked page pointer (32-bit) * * [ ][ ] (no page memory allocated) * counter #1 (16-bit) counter #2 (16-bit) * */ typedef __u16 bitmap_counter_t; #define PAGE_BITS (PAGE_SIZE << 3) #define PAGE_BIT_SHIFT (PAGE_SHIFT + 3) #define COUNTER_BITS 16 #define COUNTER_BIT_SHIFT 4 #define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) #define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2))) #define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1) #define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK) #define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK) #define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX) /* how many counters per page? */ #define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS) /* same, except a shift value for more efficient bitops */ #define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT) /* same, except a mask value for more efficient bitops */ #define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1) #define BITMAP_BLOCK_SHIFT 9 /* * bitmap structures: */ /* the in-memory bitmap is represented by bitmap_pages */ struct bitmap_page { /* * map points to the actual memory page */ char *map; /* * in emergencies (when map cannot be alloced), hijack the map * pointer and use it as two counters itself */ unsigned int hijacked:1; /* * If any counter in this page is '1' or '2' - and so could be * cleared then that page is marked as 'pending' */ unsigned int pending:1; /* * count of dirty bits on the page */ unsigned int count:30; }; /* the main bitmap structure - one per mddev */ struct bitmap { struct bitmap_counts { spinlock_t lock; struct bitmap_page *bp; /* total number of pages in the bitmap */ unsigned long pages; /* number of pages not yet allocated */ unsigned long missing_pages; /* chunksize = 2^chunkshift (for bitops) */ unsigned long chunkshift; /* total number of data chunks for the array */ unsigned long chunks; } counts; struct mddev *mddev; /* the md device that the bitmap is for */ __u64 events_cleared; int need_sync; struct bitmap_storage { /* backing disk file */ struct file *file; /* cached copy of the bitmap file superblock */ struct page *sb_page; unsigned long sb_index; /* list of cache pages for the file */ struct page **filemap; /* attributes associated filemap pages */ unsigned long *filemap_attr; /* number of pages in the file */ unsigned long file_pages; /* total bytes in the bitmap */ unsigned long bytes; } storage; unsigned long flags; int allclean; atomic_t behind_writes; /* highest actual value at runtime */ unsigned long behind_writes_used; /* * the bitmap daemon - periodically wakes up and sweeps the bitmap * file, cleaning up bits and flushing out pages to disk as necessary */ unsigned long daemon_lastrun; /* jiffies of last run */ /* * when we lasted called end_sync to update bitmap with resync * progress. */ unsigned long last_end_sync; /* pending writes to the bitmap file */ atomic_t pending_writes; wait_queue_head_t write_wait; wait_queue_head_t overflow_wait; wait_queue_head_t behind_wait; struct kernfs_node *sysfs_can_clear; /* slot offset for clustered env */ int cluster_slot; }; static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks, int chunksize, bool init); static inline char *bmname(struct bitmap *bitmap) { return bitmap->mddev ? mdname(bitmap->mddev) : "mdX"; } static bool __bitmap_enabled(struct bitmap *bitmap) { return bitmap->storage.filemap && !test_bit(BITMAP_STALE, &bitmap->flags); } static bool bitmap_enabled(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return false; return __bitmap_enabled(bitmap); } /* * check a page and, if necessary, allocate it (or hijack it if the alloc fails) * * 1) check to see if this page is allocated, if it's not then try to alloc * 2) if the alloc fails, set the page's hijacked flag so we'll use the * page pointer directly as a counter * * if we find our page, we increment the page's refcount so that it stays * allocated while we're using it */ static int md_bitmap_checkpage(struct bitmap_counts *bitmap, unsigned long page, int create, int no_hijack) __releases(bitmap->lock) __acquires(bitmap->lock) { unsigned char *mappage; WARN_ON_ONCE(page >= bitmap->pages); if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ return 0; if (bitmap->bp[page].map) /* page is already allocated, just return */ return 0; if (!create) return -ENOENT; /* this page has not been allocated yet */ spin_unlock_irq(&bitmap->lock); /* It is possible that this is being called inside a * prepare_to_wait/finish_wait loop from raid5c:make_request(). * In general it is not permitted to sleep in that context as it * can cause the loop to spin freely. * That doesn't apply here as we can only reach this point * once with any loop. * When this function completes, either bp[page].map or * bp[page].hijacked. In either case, this function will * abort before getting to this point again. So there is * no risk of a free-spin, and so it is safe to assert * that sleeping here is allowed. */ sched_annotate_sleep(); mappage = kzalloc(PAGE_SIZE, GFP_NOIO); spin_lock_irq(&bitmap->lock); if (mappage == NULL) { pr_debug("md/bitmap: map page allocation failed, hijacking\n"); /* We don't support hijack for cluster raid */ if (no_hijack) return -ENOMEM; /* failed - set the hijacked flag so that we can use the * pointer as a counter */ if (!bitmap->bp[page].map) bitmap->bp[page].hijacked = 1; } else if (bitmap->bp[page].map || bitmap->bp[page].hijacked) { /* somebody beat us to getting the page */ kfree(mappage); } else { /* no page was in place and we have one, so install it */ bitmap->bp[page].map = mappage; bitmap->missing_pages--; } return 0; } /* if page is completely empty, put it back on the free list, or dealloc it */ /* if page was hijacked, unmark the flag so it might get alloced next time */ /* Note: lock should be held when calling this */ static void md_bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) { char *ptr; if (bitmap->bp[page].count) /* page is still busy */ return; /* page is no longer in use, it can be released */ if (bitmap->bp[page].hijacked) { /* page was hijacked, undo this now */ bitmap->bp[page].hijacked = 0; bitmap->bp[page].map = NULL; } else { /* normal case, free the page */ ptr = bitmap->bp[page].map; bitmap->bp[page].map = NULL; bitmap->missing_pages++; kfree(ptr); } } /* * bitmap file handling - read and write the bitmap file and its superblock */ /* * basic page I/O operations */ /* IO operations when bitmap is stored near all superblocks */ /* choose a good rdev and read the page from there */ static int read_sb_page(struct mddev *mddev, loff_t offset, struct page *page, unsigned long index, int size) { sector_t sector = mddev->bitmap_info.offset + offset + index * (PAGE_SIZE / SECTOR_SIZE); struct md_rdev *rdev; rdev_for_each(rdev, mddev) { u32 iosize = roundup(size, bdev_logical_block_size(rdev->bdev)); if (!test_bit(In_sync, &rdev->flags) || test_bit(Faulty, &rdev->flags) || test_bit(Bitmap_sync, &rdev->flags)) continue; if (sync_page_io(rdev, sector, iosize, page, REQ_OP_READ, true)) return 0; } return -EIO; } static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mddev) { /* Iterate the disks of an mddev, using rcu to protect access to the * linked list, and raising the refcount of devices we return to ensure * they don't disappear while in use. * As devices are only added or removed when raid_disk is < 0 and * nr_pending is 0 and In_sync is clear, the entries we return will * still be in the same position on the list when we re-enter * list_for_each_entry_continue_rcu. * * Note that if entered with 'rdev == NULL' to start at the * beginning, we temporarily assign 'rdev' to an address which * isn't really an rdev, but which can be used by * list_for_each_entry_continue_rcu() to find the first entry. */ rcu_read_lock(); if (rdev == NULL) /* start at the beginning */ rdev = list_entry(&mddev->disks, struct md_rdev, same_set); else { /* release the previous rdev and start from there. */ rdev_dec_pending(rdev, mddev); } list_for_each_entry_continue_rcu(rdev, &mddev->disks, same_set) { if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) { /* this is a usable devices */ atomic_inc(&rdev->nr_pending); rcu_read_unlock(); return rdev; } } rcu_read_unlock(); return NULL; } static unsigned int optimal_io_size(struct block_device *bdev, unsigned int last_page_size, unsigned int io_size) { if (bdev_io_opt(bdev) > bdev_logical_block_size(bdev)) return roundup(last_page_size, bdev_io_opt(bdev)); return io_size; } static unsigned int bitmap_io_size(unsigned int io_size, unsigned int opt_size, loff_t start, loff_t boundary) { if (io_size != opt_size && start + opt_size / SECTOR_SIZE <= boundary) return opt_size; if (start + io_size / SECTOR_SIZE <= boundary) return io_size; /* Overflows boundary */ return 0; } static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap, unsigned long pg_index, struct page *page) { struct block_device *bdev; struct mddev *mddev = bitmap->mddev; struct bitmap_storage *store = &bitmap->storage; unsigned long num_pages = bitmap->storage.file_pages; unsigned int bitmap_limit = (num_pages - pg_index % num_pages) << PAGE_SHIFT; loff_t sboff, offset = mddev->bitmap_info.offset; sector_t ps = pg_index * PAGE_SIZE / SECTOR_SIZE; unsigned int size = PAGE_SIZE; unsigned int opt_size = PAGE_SIZE; sector_t doff; bdev = (rdev->meta_bdev) ? rdev->meta_bdev : rdev->bdev; /* we compare length (page numbers), not page offset. */ if ((pg_index - store->sb_index) == num_pages - 1) { unsigned int last_page_size = store->bytes & (PAGE_SIZE - 1); if (last_page_size == 0) last_page_size = PAGE_SIZE; size = roundup(last_page_size, bdev_logical_block_size(bdev)); opt_size = optimal_io_size(bdev, last_page_size, size); } sboff = rdev->sb_start + offset; doff = rdev->data_offset; /* Just make sure we aren't corrupting data or metadata */ if (mddev->external) { /* Bitmap could be anywhere. */ if (sboff + ps > doff && sboff < (doff + mddev->dev_sectors + PAGE_SIZE / SECTOR_SIZE)) return -EINVAL; } else if (offset < 0) { /* DATA BITMAP METADATA */ size = bitmap_io_size(size, opt_size, offset + ps, 0); if (size == 0) /* bitmap runs in to metadata */ return -EINVAL; if (doff + mddev->dev_sectors > sboff) /* data runs in to bitmap */ return -EINVAL; } else if (rdev->sb_start < rdev->data_offset) { /* METADATA BITMAP DATA */ size = bitmap_io_size(size, opt_size, sboff + ps, doff); if (size == 0) /* bitmap runs in to data */ return -EINVAL; } md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page); return 0; } static void write_sb_page(struct bitmap *bitmap, unsigned long pg_index, struct page *page, bool wait) { struct mddev *mddev = bitmap->mddev; do { struct md_rdev *rdev = NULL; while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { if (__write_sb_page(rdev, bitmap, pg_index, page) < 0) { set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); return; } } } while (wait && md_super_wait(mddev) < 0); } static void md_bitmap_file_kick(struct bitmap *bitmap); #ifdef CONFIG_MD_BITMAP_FILE static void write_file_page(struct bitmap *bitmap, struct page *page, int wait) { struct buffer_head *bh = page_buffers(page); while (bh && bh->b_blocknr) { atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); submit_bh(REQ_OP_WRITE | REQ_SYNC, bh); bh = bh->b_this_page; } if (wait) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes) == 0); } static void end_bitmap_write(struct buffer_head *bh, int uptodate) { struct bitmap *bitmap = bh->b_private; if (!uptodate) set_bit(BITMAP_WRITE_ERROR, &bitmap->flags); if (atomic_dec_and_test(&bitmap->pending_writes)) wake_up(&bitmap->write_wait); } static void free_buffers(struct page *page) { struct buffer_head *bh; if (!PagePrivate(page)) return; bh = page_buffers(page); while (bh) { struct buffer_head *next = bh->b_this_page; free_buffer_head(bh); bh = next; } detach_page_private(page); put_page(page); } /* read a page from a file. * We both read the page, and attach buffers to the page to record the * address of each block (using bmap). These addresses will be used * to write the block later, completely bypassing the filesystem. * This usage is similar to how swap files are handled, and allows us * to write to a file with no concerns of memory allocation failing. */ static int read_file_page(struct file *file, unsigned long index, struct bitmap *bitmap, unsigned long count, struct page *page) { int ret = 0; struct inode *inode = file_inode(file); struct buffer_head *bh; sector_t block, blk_cur; unsigned long blocksize = i_blocksize(inode); pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT); bh = alloc_page_buffers(page, blocksize); if (!bh) { ret = -ENOMEM; goto out; } attach_page_private(page, bh); blk_cur = index << (PAGE_SHIFT - inode->i_blkbits); while (bh) { block = blk_cur; if (count == 0) bh->b_blocknr = 0; else { ret = bmap(inode, &block); if (ret || !block) { ret = -EINVAL; bh->b_blocknr = 0; goto out; } bh->b_blocknr = block; bh->b_bdev = inode->i_sb->s_bdev; if (count < blocksize) count = 0; else count -= blocksize; bh->b_end_io = end_bitmap_write; bh->b_private = bitmap; atomic_inc(&bitmap->pending_writes); set_buffer_locked(bh); set_buffer_mapped(bh); submit_bh(REQ_OP_READ, bh); } blk_cur++; bh = bh->b_this_page; } wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) ret = -EIO; out: if (ret) pr_err("md: bitmap read error: (%dB @ %llu): %d\n", (int)PAGE_SIZE, (unsigned long long)index << PAGE_SHIFT, ret); return ret; } #else /* CONFIG_MD_BITMAP_FILE */ static void write_file_page(struct bitmap *bitmap, struct page *page, int wait) { } static int read_file_page(struct file *file, unsigned long index, struct bitmap *bitmap, unsigned long count, struct page *page) { return -EIO; } static void free_buffers(struct page *page) { put_page(page); } #endif /* CONFIG_MD_BITMAP_FILE */ /* * bitmap file superblock operations */ /* * write out a page to a file */ static void filemap_write_page(struct bitmap *bitmap, unsigned long pg_index, bool wait) { struct bitmap_storage *store = &bitmap->storage; struct page *page = store->filemap[pg_index]; if (mddev_is_clustered(bitmap->mddev)) { /* go to node bitmap area starting point */ pg_index += store->sb_index; } if (store->file) write_file_page(bitmap, page, wait); else write_sb_page(bitmap, pg_index, page, wait); } /* * md_bitmap_wait_writes() should be called before writing any bitmap * blocks, to ensure previous writes, particularly from * md_bitmap_daemon_work(), have completed. */ static void md_bitmap_wait_writes(struct bitmap *bitmap) { if (bitmap->storage.file) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); else /* Note that we ignore the return value. The writes * might have failed, but that would just mean that * some bits which should be cleared haven't been, * which is safe. The relevant bitmap blocks will * probably get written again, but there is no great * loss if they aren't. */ md_super_wait(bitmap->mddev); } /* update the event counter and sync the superblock to disk */ static void bitmap_update_sb(void *data) { bitmap_super_t *sb; struct bitmap *bitmap = data; if (!bitmap || !bitmap->mddev) /* no bitmap for this array */ return; if (bitmap->mddev->bitmap_info.external) return; if (!bitmap->storage.sb_page) /* no superblock */ return; sb = kmap_local_page(bitmap->storage.sb_page); sb->events = cpu_to_le64(bitmap->mddev->events); if (bitmap->mddev->events < bitmap->events_cleared) /* rocking back to read-only */ bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->events_cleared); /* * clear BITMAP_WRITE_ERROR bit to protect against the case that * a bitmap write error occurred but the later writes succeeded. */ sb->state = cpu_to_le32(bitmap->flags & ~BIT(BITMAP_WRITE_ERROR)); /* Just in case these have been changed via sysfs: */ sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); /* This might have been changed by a reshape */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); sb->chunksize = cpu_to_le32(bitmap->mddev->bitmap_info.chunksize); sb->nodes = cpu_to_le32(bitmap->mddev->bitmap_info.nodes); sb->sectors_reserved = cpu_to_le32(bitmap->mddev-> bitmap_info.space); kunmap_local(sb); if (bitmap->storage.file) write_file_page(bitmap, bitmap->storage.sb_page, 1); else write_sb_page(bitmap, bitmap->storage.sb_index, bitmap->storage.sb_page, 1); } static void bitmap_print_sb(struct bitmap *bitmap) { bitmap_super_t *sb; if (!bitmap || !bitmap->storage.sb_page) return; sb = kmap_local_page(bitmap->storage.sb_page); pr_debug("%s: bitmap file superblock:\n", bmname(bitmap)); pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); pr_debug(" version: %u\n", le32_to_cpu(sb->version)); pr_debug(" uuid: %08x.%08x.%08x.%08x\n", le32_to_cpu(*(__le32 *)(sb->uuid+0)), le32_to_cpu(*(__le32 *)(sb->uuid+4)), le32_to_cpu(*(__le32 *)(sb->uuid+8)), le32_to_cpu(*(__le32 *)(sb->uuid+12))); pr_debug(" events: %llu\n", (unsigned long long) le64_to_cpu(sb->events)); pr_debug("events cleared: %llu\n", (unsigned long long) le64_to_cpu(sb->events_cleared)); pr_debug(" state: %08x\n", le32_to_cpu(sb->state)); pr_debug(" chunksize: %u B\n", le32_to_cpu(sb->chunksize)); pr_debug(" daemon sleep: %us\n", le32_to_cpu(sb->daemon_sleep)); pr_debug(" sync size: %llu KB\n", (unsigned long long)le64_to_cpu(sb->sync_size)/2); pr_debug("max write behind: %u\n", le32_to_cpu(sb->write_behind)); kunmap_local(sb); } /* * bitmap_new_disk_sb * @bitmap * * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb * reads and verifies the on-disk bitmap superblock and populates bitmap_info. * This function verifies 'bitmap_info' and populates the on-disk bitmap * structure, which is to be written to disk. * * Returns: 0 on success, -Exxx on error */ static int md_bitmap_new_disk_sb(struct bitmap *bitmap) { bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (bitmap->storage.sb_page == NULL) return -ENOMEM; bitmap->storage.sb_index = 0; sb = kmap_local_page(bitmap->storage.sb_page); sb->magic = cpu_to_le32(BITMAP_MAGIC); sb->version = cpu_to_le32(BITMAP_MAJOR_HI); chunksize = bitmap->mddev->bitmap_info.chunksize; BUG_ON(!chunksize); if (!is_power_of_2(chunksize)) { kunmap_local(sb); pr_warn("bitmap chunksize not a power of 2\n"); return -EINVAL; } sb->chunksize = cpu_to_le32(chunksize); daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep; if (!daemon_sleep || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) { pr_debug("Choosing daemon_sleep default (5 sec)\n"); daemon_sleep = 5 * HZ; } sb->daemon_sleep = cpu_to_le32(daemon_sleep); bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; /* * FIXME: write_behind for RAID1. If not specified, what * is a good choice? We choose COUNTER_MAX / 2 arbitrarily. */ write_behind = bitmap->mddev->bitmap_info.max_write_behind; if (write_behind > COUNTER_MAX / 2) write_behind = COUNTER_MAX / 2; sb->write_behind = cpu_to_le32(write_behind); bitmap->mddev->bitmap_info.max_write_behind = write_behind; /* keep the array size field of the bitmap superblock up to date */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); memcpy(sb->uuid, bitmap->mddev->uuid, 16); set_bit(BITMAP_STALE, &bitmap->flags); sb->state = cpu_to_le32(bitmap->flags); bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->mddev->events); bitmap->mddev->bitmap_info.nodes = 0; kunmap_local(sb); return 0; } /* read the superblock from the bitmap file and initialize some bitmap fields */ static int md_bitmap_read_sb(struct bitmap *bitmap) { char *reason = NULL; bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; unsigned long long events; int nodes = 0; unsigned long sectors_reserved = 0; int err = -EINVAL; struct page *sb_page; loff_t offset = 0; if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { chunksize = 128 * 1024 * 1024; daemon_sleep = 5 * HZ; write_behind = 0; set_bit(BITMAP_STALE, &bitmap->flags); err = 0; goto out_no_sb; } /* page 0 is the superblock, read it... */ sb_page = alloc_page(GFP_KERNEL); if (!sb_page) return -ENOMEM; bitmap->storage.sb_page = sb_page; re_read: /* If cluster_slot is set, the cluster is setup */ if (bitmap->cluster_slot >= 0) { sector_t bm_blocks = bitmap->mddev->resync_max_sectors; bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, (bitmap->mddev->bitmap_info.chunksize >> 9)); /* bits to bytes */ bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); /* to 4k blocks */ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); offset = bitmap->cluster_slot * (bm_blocks << 3); pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, bitmap->cluster_slot, offset); } if (bitmap->storage.file) { loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host); int bytes = isize > PAGE_SIZE ? PAGE_SIZE : isize; err = read_file_page(bitmap->storage.file, 0, bitmap, bytes, sb_page); } else { err = read_sb_page(bitmap->mddev, offset, sb_page, 0, sizeof(bitmap_super_t)); } if (err) return err; err = -EINVAL; sb = kmap_local_page(sb_page); chunksize = le32_to_cpu(sb->chunksize); daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); sectors_reserved = le32_to_cpu(sb->sectors_reserved); /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) reason = "bad magic"; else if (le32_to_cpu(sb->version) < BITMAP_MAJOR_LO || le32_to_cpu(sb->version) > BITMAP_MAJOR_CLUSTERED) reason = "unrecognized superblock version"; else if (chunksize < 512) reason = "bitmap chunksize too small"; else if (!is_power_of_2(chunksize)) reason = "bitmap chunksize not a power of 2"; else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) reason = "daemon sleep period out of range"; else if (write_behind > COUNTER_MAX) reason = "write-behind limit out of range (0 - 16383)"; if (reason) { pr_warn("%s: invalid bitmap file superblock: %s\n", bmname(bitmap), reason); goto out; } /* * Setup nodes/clustername only if bitmap version is * cluster-compatible */ if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { nodes = le32_to_cpu(sb->nodes); strscpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); } /* keep the array size field of the bitmap superblock up to date */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); if (bitmap->mddev->persistent) { /* * We have a persistent array superblock, so compare the * bitmap's UUID and event counter to the mddev's */ if (memcmp(sb->uuid, bitmap->mddev->uuid, 16)) { pr_warn("%s: bitmap superblock UUID mismatch\n", bmname(bitmap)); goto out; } events = le64_to_cpu(sb->events); if (!nodes && (events < bitmap->mddev->events)) { pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n", bmname(bitmap), events, (unsigned long long) bitmap->mddev->events); set_bit(BITMAP_STALE, &bitmap->flags); } } /* assign fields using values from superblock */ bitmap->flags |= le32_to_cpu(sb->state); if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) set_bit(BITMAP_HOSTENDIAN, &bitmap->flags); bitmap->events_cleared = le64_to_cpu(sb->events_cleared); err = 0; out: kunmap_local(sb); if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { /* Assigning chunksize is required for "re_read" */ bitmap->mddev->bitmap_info.chunksize = chunksize; err = md_setup_cluster(bitmap->mddev, nodes); if (err) { pr_warn("%s: Could not setup cluster service (%d)\n", bmname(bitmap), err); goto out_no_sb; } bitmap->cluster_slot = bitmap->mddev->cluster_ops->slot_number(bitmap->mddev); goto re_read; } out_no_sb: if (err == 0) { if (test_bit(BITMAP_STALE, &bitmap->flags)) bitmap->events_cleared = bitmap->mddev->events; bitmap->mddev->bitmap_info.chunksize = chunksize; bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; bitmap->mddev->bitmap_info.max_write_behind = write_behind; bitmap->mddev->bitmap_info.nodes = nodes; if (bitmap->mddev->bitmap_info.space == 0 || bitmap->mddev->bitmap_info.space > sectors_reserved) bitmap->mddev->bitmap_info.space = sectors_reserved; } else { bitmap_print_sb(bitmap); if (bitmap->cluster_slot < 0) md_cluster_stop(bitmap->mddev); } return err; } /* * general bitmap file operations */ /* * on-disk bitmap: * * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap * file a page at a time. There's a superblock at the start of the file. */ /* calculate the index of the page that contains this bit */ static inline unsigned long file_page_index(struct bitmap_storage *store, unsigned long chunk) { if (store->sb_page) chunk += sizeof(bitmap_super_t) << 3; return chunk >> PAGE_BIT_SHIFT; } /* calculate the (bit) offset of this bit within a page */ static inline unsigned long file_page_offset(struct bitmap_storage *store, unsigned long chunk) { if (store->sb_page) chunk += sizeof(bitmap_super_t) << 3; return chunk & (PAGE_BITS - 1); } /* * return a pointer to the page in the filemap that contains the given bit * */ static inline struct page *filemap_get_page(struct bitmap_storage *store, unsigned long chunk) { if (file_page_index(store, chunk) >= store->file_pages) return NULL; return store->filemap[file_page_index(store, chunk)]; } static int md_bitmap_storage_alloc(struct bitmap_storage *store, unsigned long chunks, int with_super, int slot_number) { int pnum, offset = 0; unsigned long num_pages; unsigned long bytes; bytes = DIV_ROUND_UP(chunks, 8); if (with_super) bytes += sizeof(bitmap_super_t); num_pages = DIV_ROUND_UP(bytes, PAGE_SIZE); offset = slot_number * num_pages; store->filemap = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); if (!store->filemap) return -ENOMEM; if (with_super && !store->sb_page) { store->sb_page = alloc_page(GFP_KERNEL|__GFP_ZERO); if (store->sb_page == NULL) return -ENOMEM; } pnum = 0; if (store->sb_page) { store->filemap[0] = store->sb_page; pnum = 1; store->sb_index = offset; } for ( ; pnum < num_pages; pnum++) { store->filemap[pnum] = alloc_page(GFP_KERNEL|__GFP_ZERO); if (!store->filemap[pnum]) { store->file_pages = pnum; return -ENOMEM; } } store->file_pages = pnum; /* We need 4 bits per page, rounded up to a multiple * of sizeof(unsigned long) */ store->filemap_attr = kzalloc( roundup(DIV_ROUND_UP(num_pages*4, 8), sizeof(unsigned long)), GFP_KERNEL); if (!store->filemap_attr) return -ENOMEM; store->bytes = bytes; return 0; } static void md_bitmap_file_unmap(struct bitmap_storage *store) { struct file *file = store->file; struct page *sb_page = store->sb_page; struct page **map = store->filemap; int pages = store->file_pages; while (pages--) if (map[pages] != sb_page) /* 0 is sb_page, release it below */ free_buffers(map[pages]); kfree(map); kfree(store->filemap_attr); if (sb_page) free_buffers(sb_page); if (file) { struct inode *inode = file_inode(file); invalidate_mapping_pages(inode->i_mapping, 0, -1); fput(file); } } /* * bitmap_file_kick - if an error occurs while manipulating the bitmap file * then it is no longer reliable, so we stop using it and we mark the file * as failed in the superblock */ static void md_bitmap_file_kick(struct bitmap *bitmap) { if (!test_and_set_bit(BITMAP_STALE, &bitmap->flags)) { bitmap_update_sb(bitmap); if (bitmap->storage.file) { pr_warn("%s: kicking failed bitmap file %pD4 from array!\n", bmname(bitmap), bitmap->storage.file); } else pr_warn("%s: disabling internal bitmap due to errors\n", bmname(bitmap)); } } enum bitmap_page_attr { BITMAP_PAGE_DIRTY = 0, /* there are set bits that need to be synced */ BITMAP_PAGE_PENDING = 1, /* there are bits that are being cleaned. * i.e. counter is 1 or 2. */ BITMAP_PAGE_NEEDWRITE = 2, /* there are cleared bits that need to be synced */ }; static inline void set_page_attr(struct bitmap *bitmap, int pnum, enum bitmap_page_attr attr) { set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } static inline void clear_page_attr(struct bitmap *bitmap, int pnum, enum bitmap_page_attr attr) { clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } static inline int test_page_attr(struct bitmap *bitmap, int pnum, enum bitmap_page_attr attr) { return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } static inline int test_and_clear_page_attr(struct bitmap *bitmap, int pnum, enum bitmap_page_attr attr) { return test_and_clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); } /* * bitmap_file_set_bit -- called before performing a write to the md device * to set (and eventually sync) a particular bit in the bitmap file * * we set the bit immediately, then we record the page number so that * when an unplug occurs, we can flush the dirty pages out to disk */ static void md_bitmap_file_set_bit(struct bitmap *bitmap, sector_t block) { unsigned long bit; struct page *page; void *kaddr; unsigned long chunk = block >> bitmap->counts.chunkshift; struct bitmap_storage *store = &bitmap->storage; unsigned long index = file_page_index(store, chunk); unsigned long node_offset = 0; index += store->sb_index; if (mddev_is_clustered(bitmap->mddev)) node_offset = bitmap->cluster_slot * store->file_pages; page = filemap_get_page(&bitmap->storage, chunk); if (!page) return; bit = file_page_offset(&bitmap->storage, chunk); /* set the bit */ kaddr = kmap_local_page(page); if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) set_bit(bit, kaddr); else set_bit_le(bit, kaddr); kunmap_local(kaddr); pr_debug("set file bit %lu page %lu\n", bit, index); /* record page number so it gets flushed to disk when unplug occurs */ set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_DIRTY); } static void md_bitmap_file_clear_bit(struct bitmap *bitmap, sector_t block) { unsigned long bit; struct page *page; void *paddr; unsigned long chunk = block >> bitmap->counts.chunkshift; struct bitmap_storage *store = &bitmap->storage; unsigned long index = file_page_index(store, chunk); unsigned long node_offset = 0; index += store->sb_index; if (mddev_is_clustered(bitmap->mddev)) node_offset = bitmap->cluster_slot * store->file_pages; page = filemap_get_page(&bitmap->storage, chunk); if (!page) return; bit = file_page_offset(&bitmap->storage, chunk); paddr = kmap_local_page(page); if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) clear_bit(bit, paddr); else clear_bit_le(bit, paddr); kunmap_local(paddr); if (!test_page_attr(bitmap, index - node_offset, BITMAP_PAGE_NEEDWRITE)) { set_page_attr(bitmap, index - node_offset, BITMAP_PAGE_PENDING); bitmap->allclean = 0; } } static int md_bitmap_file_test_bit(struct bitmap *bitmap, sector_t block) { unsigned long bit; struct page *page; void *paddr; unsigned long chunk = block >> bitmap->counts.chunkshift; int set = 0; page = filemap_get_page(&bitmap->storage, chunk); if (!page) return -EINVAL; bit = file_page_offset(&bitmap->storage, chunk); paddr = kmap_local_page(page); if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) set = test_bit(bit, paddr); else set = test_bit_le(bit, paddr); kunmap_local(paddr); return set; } /* this gets called when the md device is ready to unplug its underlying * (slave) device queues -- before we let any writes go down, we need to * sync the dirty pages of the bitmap file to disk */ static void __bitmap_unplug(struct bitmap *bitmap) { unsigned long i; int dirty, need_write; int writing = 0; if (!__bitmap_enabled(bitmap)) return; /* look at each page to see if there are any set bits that need to be * flushed out to disk */ for (i = 0; i < bitmap->storage.file_pages; i++) { dirty = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); need_write = test_and_clear_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); if (dirty || need_write) { if (!writing) { md_bitmap_wait_writes(bitmap); mddev_add_trace_msg(bitmap->mddev, "md bitmap_unplug"); } clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING); filemap_write_page(bitmap, i, false); writing = 1; } } if (writing) md_bitmap_wait_writes(bitmap); if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) md_bitmap_file_kick(bitmap); } struct bitmap_unplug_work { struct work_struct work; struct bitmap *bitmap; struct completion *done; }; static void md_bitmap_unplug_fn(struct work_struct *work) { struct bitmap_unplug_work *unplug_work = container_of(work, struct bitmap_unplug_work, work); __bitmap_unplug(unplug_work->bitmap); complete(unplug_work->done); } static void bitmap_unplug_async(struct bitmap *bitmap) { DECLARE_COMPLETION_ONSTACK(done); struct bitmap_unplug_work unplug_work; INIT_WORK_ONSTACK(&unplug_work.work, md_bitmap_unplug_fn); unplug_work.bitmap = bitmap; unplug_work.done = &done; queue_work(md_bitmap_wq, &unplug_work.work); wait_for_completion(&done); destroy_work_on_stack(&unplug_work.work); } static void bitmap_unplug(struct mddev *mddev, bool sync) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return; if (sync) __bitmap_unplug(bitmap); else bitmap_unplug_async(bitmap); } static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed); /* * Initialize the in-memory bitmap from the on-disk bitmap and set up the memory * mapping of the bitmap file. * * Special case: If there's no bitmap file, or if the bitmap file had been * previously kicked from the array, we mark all the bits as 1's in order to * cause a full resync. * * We ignore all bits for sectors that end earlier than 'start'. * This is used when reading an out-of-date bitmap. */ static int md_bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) { bool outofdate = test_bit(BITMAP_STALE, &bitmap->flags); struct mddev *mddev = bitmap->mddev; unsigned long chunks = bitmap->counts.chunks; struct bitmap_storage *store = &bitmap->storage; struct file *file = store->file; unsigned long node_offset = 0; unsigned long bit_cnt = 0; unsigned long i; int ret; if (!file && !mddev->bitmap_info.offset) { /* No permanent bitmap - fill with '1s'. */ store->filemap = NULL; store->file_pages = 0; for (i = 0; i < chunks ; i++) { /* if the disk bit is set, set the memory bit */ int needed = ((sector_t)(i+1) << (bitmap->counts.chunkshift) >= start); md_bitmap_set_memory_bits(bitmap, (sector_t)i << bitmap->counts.chunkshift, needed); } return 0; } if (file && i_size_read(file->f_mapping->host) < store->bytes) { pr_warn("%s: bitmap file too short %lu < %lu\n", bmname(bitmap), (unsigned long) i_size_read(file->f_mapping->host), store->bytes); ret = -ENOSPC; goto err; } if (mddev_is_clustered(mddev)) node_offset = bitmap->cluster_slot * (DIV_ROUND_UP(store->bytes, PAGE_SIZE)); for (i = 0; i < store->file_pages; i++) { struct page *page = store->filemap[i]; int count; /* unmap the old page, we're done with it */ if (i == store->file_pages - 1) count = store->bytes - i * PAGE_SIZE; else count = PAGE_SIZE; if (file) ret = read_file_page(file, i, bitmap, count, page); else ret = read_sb_page(mddev, 0, page, i + node_offset, count); if (ret) goto err; } if (outofdate) { pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap)); for (i = 0; i < store->file_pages; i++) { struct page *page = store->filemap[i]; unsigned long offset = 0; void *paddr; if (i == 0 && !mddev->bitmap_info.external) offset = sizeof(bitmap_super_t); /* * If the bitmap is out of date, dirty the whole page * and write it out */ paddr = kmap_local_page(page); memset(paddr + offset, 0xff, PAGE_SIZE - offset); kunmap_local(paddr); filemap_write_page(bitmap, i, true); if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) { ret = -EIO; goto err; } } } for (i = 0; i < chunks; i++) { struct page *page = filemap_get_page(&bitmap->storage, i); unsigned long bit = file_page_offset(&bitmap->storage, i); void *paddr; bool was_set; paddr = kmap_local_page(page); if (test_bit(BITMAP_HOSTENDIAN, &bitmap->flags)) was_set = test_bit(bit, paddr); else was_set = test_bit_le(bit, paddr); kunmap_local(paddr); if (was_set) { /* if the disk bit is set, set the memory bit */ int needed = ((sector_t)(i+1) << bitmap->counts.chunkshift >= start); md_bitmap_set_memory_bits(bitmap, (sector_t)i << bitmap->counts.chunkshift, needed); bit_cnt++; } } pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n", bmname(bitmap), store->file_pages, bit_cnt, chunks); return 0; err: pr_warn("%s: bitmap initialisation failed: %d\n", bmname(bitmap), ret); return ret; } /* just flag bitmap pages as needing to be written. */ static void bitmap_write_all(struct mddev *mddev) { int i; struct bitmap *bitmap = mddev->bitmap; if (!bitmap || !bitmap->storage.filemap) return; /* Only one copy, so nothing needed */ if (bitmap->storage.file) return; for (i = 0; i < bitmap->storage.file_pages; i++) set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); bitmap->allclean = 0; } static void md_bitmap_count_page(struct bitmap_counts *bitmap, sector_t offset, int inc) { sector_t chunk = offset >> bitmap->chunkshift; unsigned long page = chunk >> PAGE_COUNTER_SHIFT; bitmap->bp[page].count += inc; md_bitmap_checkfree(bitmap, page); } static void md_bitmap_set_pending(struct bitmap_counts *bitmap, sector_t offset) { sector_t chunk = offset >> bitmap->chunkshift; unsigned long page = chunk >> PAGE_COUNTER_SHIFT; struct bitmap_page *bp = &bitmap->bp[page]; if (!bp->pending) bp->pending = 1; } static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap, sector_t offset, sector_t *blocks, int create); static void mddev_set_timeout(struct mddev *mddev, unsigned long timeout, bool force) { struct md_thread *thread; rcu_read_lock(); thread = rcu_dereference(mddev->thread); if (!thread) goto out; if (force || thread->timeout < MAX_SCHEDULE_TIMEOUT) thread->timeout = timeout; out: rcu_read_unlock(); } /* * bitmap daemon -- periodically wakes up to clean bits and flush pages * out to disk */ static void bitmap_daemon_work(struct mddev *mddev) { struct bitmap *bitmap; unsigned long j; unsigned long nextpage; sector_t blocks; struct bitmap_counts *counts; /* Use a mutex to guard daemon_work against * bitmap_destroy. */ mutex_lock(&mddev->bitmap_info.mutex); bitmap = mddev->bitmap; if (bitmap == NULL) { mutex_unlock(&mddev->bitmap_info.mutex); return; } if (time_before(jiffies, bitmap->daemon_lastrun + mddev->bitmap_info.daemon_sleep)) goto done; bitmap->daemon_lastrun = jiffies; if (bitmap->allclean) { mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true); goto done; } bitmap->allclean = 1; mddev_add_trace_msg(bitmap->mddev, "md bitmap_daemon_work"); /* Any file-page which is PENDING now needs to be written. * So set NEEDWRITE now, then after we make any last-minute changes * we will write it. */ for (j = 0; j < bitmap->storage.file_pages; j++) if (test_and_clear_page_attr(bitmap, j, BITMAP_PAGE_PENDING)) set_page_attr(bitmap, j, BITMAP_PAGE_NEEDWRITE); if (bitmap->need_sync && mddev->bitmap_info.external == 0) { /* Arrange for superblock update as well as * other changes */ bitmap_super_t *sb; bitmap->need_sync = 0; if (bitmap->storage.filemap) { sb = kmap_local_page(bitmap->storage.sb_page); sb->events_cleared = cpu_to_le64(bitmap->events_cleared); kunmap_local(sb); set_page_attr(bitmap, 0, BITMAP_PAGE_NEEDWRITE); } } /* Now look at the bitmap counters and if any are '2' or '1', * decrement and handle accordingly. */ counts = &bitmap->counts; spin_lock_irq(&counts->lock); nextpage = 0; for (j = 0; j < counts->chunks; j++) { bitmap_counter_t *bmc; sector_t block = (sector_t)j << counts->chunkshift; if (j == nextpage) { nextpage += PAGE_COUNTER_RATIO; if (!counts->bp[j >> PAGE_COUNTER_SHIFT].pending) { j |= PAGE_COUNTER_MASK; continue; } counts->bp[j >> PAGE_COUNTER_SHIFT].pending = 0; } bmc = md_bitmap_get_counter(counts, block, &blocks, 0); if (!bmc) { j |= PAGE_COUNTER_MASK; continue; } if (*bmc == 1 && !bitmap->need_sync) { /* We can clear the bit */ *bmc = 0; md_bitmap_count_page(counts, block, -1); md_bitmap_file_clear_bit(bitmap, block); } else if (*bmc && *bmc <= 2) { *bmc = 1; md_bitmap_set_pending(counts, block); bitmap->allclean = 0; } } spin_unlock_irq(&counts->lock); md_bitmap_wait_writes(bitmap); /* Now start writeout on any page in NEEDWRITE that isn't DIRTY. * DIRTY pages need to be written by bitmap_unplug so it can wait * for them. * If we find any DIRTY page we stop there and let bitmap_unplug * handle all the rest. This is important in the case where * the first blocking holds the superblock and it has been updated. * We mustn't write any other blocks before the superblock. */ for (j = 0; j < bitmap->storage.file_pages && !test_bit(BITMAP_STALE, &bitmap->flags); j++) { if (test_page_attr(bitmap, j, BITMAP_PAGE_DIRTY)) /* bitmap_unplug will handle the rest */ break; if (bitmap->storage.filemap && test_and_clear_page_attr(bitmap, j, BITMAP_PAGE_NEEDWRITE)) filemap_write_page(bitmap, j, false); } done: if (bitmap->allclean == 0) mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true); mutex_unlock(&mddev->bitmap_info.mutex); } static bitmap_counter_t *md_bitmap_get_counter(struct bitmap_counts *bitmap, sector_t offset, sector_t *blocks, int create) __releases(bitmap->lock) __acquires(bitmap->lock) { /* If 'create', we might release the lock and reclaim it. * The lock must have been taken with interrupts enabled. * If !create, we don't release the lock. */ sector_t chunk = offset >> bitmap->chunkshift; unsigned long page = chunk >> PAGE_COUNTER_SHIFT; unsigned long pageoff = (chunk & PAGE_COUNTER_MASK) << COUNTER_BYTE_SHIFT; sector_t csize = ((sector_t)1) << bitmap->chunkshift; int err; if (page >= bitmap->pages) { /* * This can happen if bitmap_start_sync goes beyond * End-of-device while looking for a whole page or * user set a huge number to sysfs bitmap_set_bits. */ *blocks = csize - (offset & (csize - 1)); return NULL; } err = md_bitmap_checkpage(bitmap, page, create, 0); if (bitmap->bp[page].hijacked || bitmap->bp[page].map == NULL) csize = ((sector_t)1) << (bitmap->chunkshift + PAGE_COUNTER_SHIFT); *blocks = csize - (offset & (csize - 1)); if (err < 0) return NULL; /* now locked ... */ if (bitmap->bp[page].hijacked) { /* hijacked pointer */ /* should we use the first or second counter field * of the hijacked pointer? */ int hi = (pageoff > PAGE_COUNTER_MASK); return &((bitmap_counter_t *) &bitmap->bp[page].map)[hi]; } else /* page is allocated */ return (bitmap_counter_t *) &(bitmap->bp[page].map[pageoff]); } static void bitmap_start_write(struct mddev *mddev, sector_t offset, unsigned long sectors) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return; while (sectors) { sector_t blocks; bitmap_counter_t *bmc; spin_lock_irq(&bitmap->counts.lock); bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 1); if (!bmc) { spin_unlock_irq(&bitmap->counts.lock); return; } if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) { DEFINE_WAIT(__wait); /* note that it is safe to do the prepare_to_wait * after the test as long as we do it before dropping * the spinlock. */ prepare_to_wait(&bitmap->overflow_wait, &__wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bitmap->counts.lock); schedule(); finish_wait(&bitmap->overflow_wait, &__wait); continue; } switch (*bmc) { case 0: md_bitmap_file_set_bit(bitmap, offset); md_bitmap_count_page(&bitmap->counts, offset, 1); fallthrough; case 1: *bmc = 2; } (*bmc)++; spin_unlock_irq(&bitmap->counts.lock); offset += blocks; if (sectors > blocks) sectors -= blocks; else sectors = 0; } } static void bitmap_end_write(struct mddev *mddev, sector_t offset, unsigned long sectors) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return; while (sectors) { sector_t blocks; unsigned long flags; bitmap_counter_t *bmc; spin_lock_irqsave(&bitmap->counts.lock, flags); bmc = md_bitmap_get_counter(&bitmap->counts, offset, &blocks, 0); if (!bmc) { spin_unlock_irqrestore(&bitmap->counts.lock, flags); return; } if (!bitmap->mddev->degraded) { if (bitmap->events_cleared < bitmap->mddev->events) { bitmap->events_cleared = bitmap->mddev->events; bitmap->need_sync = 1; sysfs_notify_dirent_safe( bitmap->sysfs_can_clear); } } else if (!NEEDED(*bmc)) { *bmc |= NEEDED_MASK; } if (COUNTER(*bmc) == COUNTER_MAX) wake_up(&bitmap->overflow_wait); (*bmc)--; if (*bmc <= 2) { md_bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } spin_unlock_irqrestore(&bitmap->counts.lock, flags); offset += blocks; if (sectors > blocks) sectors -= blocks; else sectors = 0; } } static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, bool degraded) { bitmap_counter_t *bmc; bool rv; if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */ *blocks = 1024; return true; /* always resync if no bitmap */ } spin_lock_irq(&bitmap->counts.lock); rv = false; bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); if (bmc) { /* locked */ if (RESYNC(*bmc)) { rv = true; } else if (NEEDED(*bmc)) { rv = true; if (!degraded) { /* don't set/clear bits if degraded */ *bmc |= RESYNC_MASK; *bmc &= ~NEEDED_MASK; } } } spin_unlock_irq(&bitmap->counts.lock); return rv; } static bool bitmap_start_sync(struct mddev *mddev, sector_t offset, sector_t *blocks, bool degraded) { /* bitmap_start_sync must always report on multiples of whole * pages, otherwise resync (which is very PAGE_SIZE based) will * get confused. * So call __bitmap_start_sync repeatedly (if needed) until * At least PAGE_SIZE>>9 blocks are covered. * Return the 'or' of the result. */ bool rv = false; sector_t blocks1; *blocks = 0; while (*blocks < (PAGE_SIZE>>9)) { rv |= __bitmap_start_sync(mddev->bitmap, offset, &blocks1, degraded); offset += blocks1; *blocks += blocks1; } return rv; } static void __bitmap_end_sync(struct bitmap *bitmap, sector_t offset, sector_t *blocks, bool aborted) { bitmap_counter_t *bmc; unsigned long flags; if (bitmap == NULL) { *blocks = 1024; return; } spin_lock_irqsave(&bitmap->counts.lock, flags); bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0); if (bmc == NULL) goto unlock; /* locked */ if (RESYNC(*bmc)) { *bmc &= ~RESYNC_MASK; if (!NEEDED(*bmc) && aborted) *bmc |= NEEDED_MASK; else { if (*bmc <= 2) { md_bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } } } unlock: spin_unlock_irqrestore(&bitmap->counts.lock, flags); } static void bitmap_end_sync(struct mddev *mddev, sector_t offset, sector_t *blocks) { __bitmap_end_sync(mddev->bitmap, offset, blocks, true); } static void bitmap_close_sync(struct mddev *mddev) { /* Sync has finished, and any bitmap chunks that weren't synced * properly have been aborted. It remains to us to clear the * RESYNC bit wherever it is still on */ sector_t sector = 0; sector_t blocks; struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return; while (sector < bitmap->mddev->resync_max_sectors) { __bitmap_end_sync(bitmap, sector, &blocks, false); sector += blocks; } } static void bitmap_cond_end_sync(struct mddev *mddev, sector_t sector, bool force) { sector_t s = 0; sector_t blocks; struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return; if (sector == 0) { bitmap->last_end_sync = jiffies; return; } if (!force && time_before(jiffies, (bitmap->last_end_sync + bitmap->mddev->bitmap_info.daemon_sleep))) return; wait_event(bitmap->mddev->recovery_wait, atomic_read(&bitmap->mddev->recovery_active) == 0); bitmap->mddev->curr_resync_completed = sector; set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags); sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); s = 0; while (s < sector && s < bitmap->mddev->resync_max_sectors) { __bitmap_end_sync(bitmap, s, &blocks, false); s += blocks; } bitmap->last_end_sync = jiffies; sysfs_notify_dirent_safe(bitmap->mddev->sysfs_completed); } static void bitmap_sync_with_cluster(struct mddev *mddev, sector_t old_lo, sector_t old_hi, sector_t new_lo, sector_t new_hi) { struct bitmap *bitmap = mddev->bitmap; sector_t sector, blocks = 0; for (sector = old_lo; sector < new_lo; ) { __bitmap_end_sync(bitmap, sector, &blocks, false); sector += blocks; } WARN((blocks > new_lo) && old_lo, "alignment is not correct for lo\n"); for (sector = old_hi; sector < new_hi; ) { bitmap_start_sync(mddev, sector, &blocks, false); sector += blocks; } WARN((blocks > new_hi) && old_hi, "alignment is not correct for hi\n"); } static void md_bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int needed) { /* For each chunk covered by any of these sectors, set the * counter to 2 and possibly set resync_needed. They should all * be 0 at this point */ sector_t secs; bitmap_counter_t *bmc; spin_lock_irq(&bitmap->counts.lock); bmc = md_bitmap_get_counter(&bitmap->counts, offset, &secs, 1); if (!bmc) { spin_unlock_irq(&bitmap->counts.lock); return; } if (!*bmc) { *bmc = 2; md_bitmap_count_page(&bitmap->counts, offset, 1); md_bitmap_set_pending(&bitmap->counts, offset); bitmap->allclean = 0; } if (needed) *bmc |= NEEDED_MASK; spin_unlock_irq(&bitmap->counts.lock); } /* dirty the memory and file bits for bitmap chunks "s" to "e" */ static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s, unsigned long e) { unsigned long chunk; struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return; for (chunk = s; chunk <= e; chunk++) { sector_t sec = (sector_t)chunk << bitmap->counts.chunkshift; md_bitmap_set_memory_bits(bitmap, sec, 1); md_bitmap_file_set_bit(bitmap, sec); if (sec < bitmap->mddev->recovery_cp) /* We are asserting that the array is dirty, * so move the recovery_cp address back so * that it is obvious that it is dirty */ bitmap->mddev->recovery_cp = sec; } } static void bitmap_flush(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; long sleep; if (!bitmap) /* there was no bitmap */ return; /* run the daemon_work three time to ensure everything is flushed * that can be */ sleep = mddev->bitmap_info.daemon_sleep * 2; bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); bitmap->daemon_lastrun -= sleep; bitmap_daemon_work(mddev); if (mddev->bitmap_info.external) md_super_wait(mddev); bitmap_update_sb(bitmap); } static void md_bitmap_free(void *data) { unsigned long k, pages; struct bitmap_page *bp; struct bitmap *bitmap = data; if (!bitmap) /* there was no bitmap */ return; if (bitmap->sysfs_can_clear) sysfs_put(bitmap->sysfs_can_clear); if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && bitmap->cluster_slot == bitmap->mddev->cluster_ops->slot_number(bitmap->mddev)) md_cluster_stop(bitmap->mddev); /* Shouldn't be needed - but just in case.... */ wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes) == 0); /* release the bitmap file */ md_bitmap_file_unmap(&bitmap->storage); bp = bitmap->counts.bp; pages = bitmap->counts.pages; /* free all allocated memory */ if (bp) /* deallocate the page memory */ for (k = 0; k < pages; k++) if (bp[k].map && !bp[k].hijacked) kfree(bp[k].map); kfree(bp); kfree(bitmap); } static void bitmap_start_behind_write(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; int bw; if (!bitmap) return; atomic_inc(&bitmap->behind_writes); bw = atomic_read(&bitmap->behind_writes); if (bw > bitmap->behind_writes_used) bitmap->behind_writes_used = bw; pr_debug("inc write-behind count %d/%lu\n", bw, bitmap->mddev->bitmap_info.max_write_behind); } static void bitmap_end_behind_write(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return; if (atomic_dec_and_test(&bitmap->behind_writes)) wake_up(&bitmap->behind_wait); pr_debug("dec write-behind count %d/%lu\n", atomic_read(&bitmap->behind_writes), bitmap->mddev->bitmap_info.max_write_behind); } static void bitmap_wait_behind_writes(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; /* wait for behind writes to complete */ if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { pr_debug("md:%s: behind writes in progress - waiting to stop.\n", mdname(mddev)); /* need to kick something here to make sure I/O goes? */ wait_event(bitmap->behind_wait, atomic_read(&bitmap->behind_writes) == 0); } } static void bitmap_destroy(struct mddev *mddev) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) /* there was no bitmap */ return; bitmap_wait_behind_writes(mddev); if (!mddev->serialize_policy) mddev_destroy_serial_pool(mddev, NULL); mutex_lock(&mddev->bitmap_info.mutex); spin_lock(&mddev->lock); mddev->bitmap = NULL; /* disconnect from the md device */ spin_unlock(&mddev->lock); mutex_unlock(&mddev->bitmap_info.mutex); mddev_set_timeout(mddev, MAX_SCHEDULE_TIMEOUT, true); md_bitmap_free(bitmap); } /* * initialize the bitmap structure * if this returns an error, bitmap_destroy must be called to do clean up * once mddev->bitmap is set */ static struct bitmap *__bitmap_create(struct mddev *mddev, int slot) { struct bitmap *bitmap; sector_t blocks = mddev->resync_max_sectors; struct file *file = mddev->bitmap_info.file; int err; struct kernfs_node *bm = NULL; BUILD_BUG_ON(sizeof(bitmap_super_t) != 256); BUG_ON(file && mddev->bitmap_info.offset); if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { pr_notice("md/raid:%s: array with journal cannot have bitmap\n", mdname(mddev)); return ERR_PTR(-EBUSY); } bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); if (!bitmap) return ERR_PTR(-ENOMEM); spin_lock_init(&bitmap->counts.lock); atomic_set(&bitmap->pending_writes, 0); init_waitqueue_head(&bitmap->write_wait); init_waitqueue_head(&bitmap->overflow_wait); init_waitqueue_head(&bitmap->behind_wait); bitmap->mddev = mddev; bitmap->cluster_slot = slot; if (mddev->kobj.sd) bm = sysfs_get_dirent(mddev->kobj.sd, "bitmap"); if (bm) { bitmap->sysfs_can_clear = sysfs_get_dirent(bm, "can_clear"); sysfs_put(bm); } else bitmap->sysfs_can_clear = NULL; bitmap->storage.file = file; if (file) { get_file(file); /* As future accesses to this file will use bmap, * and bypass the page cache, we must sync the file * first. */ vfs_fsync(file, 1); } /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ if (!mddev->bitmap_info.external) { /* * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is * instructing us to create a new on-disk bitmap instance. */ if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags)) err = md_bitmap_new_disk_sb(bitmap); else err = md_bitmap_read_sb(bitmap); } else { err = 0; if (mddev->bitmap_info.chunksize == 0 || mddev->bitmap_info.daemon_sleep == 0) /* chunksize and time_base need to be * set first. */ err = -EINVAL; } if (err) goto error; bitmap->daemon_lastrun = jiffies; err = __bitmap_resize(bitmap, blocks, mddev->bitmap_info.chunksize, true); if (err) goto error; pr_debug("created bitmap (%lu pages) for device %s\n", bitmap->counts.pages, bmname(bitmap)); err = test_bit(BITMAP_WRITE_ERROR, &bitmap->flags) ? -EIO : 0; if (err) goto error; return bitmap; error: md_bitmap_free(bitmap); return ERR_PTR(err); } static int bitmap_create(struct mddev *mddev) { struct bitmap *bitmap = __bitmap_create(mddev, -1); if (IS_ERR(bitmap)) return PTR_ERR(bitmap); mddev->bitmap = bitmap; return 0; } static int bitmap_load(struct mddev *mddev) { int err = 0; sector_t start = 0; sector_t sector = 0; struct bitmap *bitmap = mddev->bitmap; struct md_rdev *rdev; if (!bitmap) goto out; rdev_for_each(rdev, mddev) mddev_create_serial_pool(mddev, rdev); if (mddev_is_clustered(mddev)) mddev->cluster_ops->load_bitmaps(mddev, mddev->bitmap_info.nodes); /* Clear out old bitmap info first: Either there is none, or we * are resuming after someone else has possibly changed things, * so we should forget old cached info. * All chunks should be clean, but some might need_sync. */ while (sector < mddev->resync_max_sectors) { sector_t blocks; bitmap_start_sync(mddev, sector, &blocks, false); sector += blocks; } bitmap_close_sync(mddev); if (mddev->degraded == 0 || bitmap->events_cleared == mddev->events) /* no need to keep dirty bits to optimise a * re-add of a missing device */ start = mddev->recovery_cp; mutex_lock(&mddev->bitmap_info.mutex); err = md_bitmap_init_from_disk(bitmap, start); mutex_unlock(&mddev->bitmap_info.mutex); if (err) goto out; clear_bit(BITMAP_STALE, &bitmap->flags); /* Kick recovery in case any bits were set */ set_bit(MD_RECOVERY_NEEDED, &bitmap->mddev->recovery); mddev_set_timeout(mddev, mddev->bitmap_info.daemon_sleep, true); md_wakeup_thread(mddev->thread); bitmap_update_sb(bitmap); if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags)) err = -EIO; out: return err; } /* caller need to free returned bitmap with md_bitmap_free() */ static void *bitmap_get_from_slot(struct mddev *mddev, int slot) { int rv = 0; struct bitmap *bitmap; bitmap = __bitmap_create(mddev, slot); if (IS_ERR(bitmap)) { rv = PTR_ERR(bitmap); return ERR_PTR(rv); } rv = md_bitmap_init_from_disk(bitmap, 0); if (rv) { md_bitmap_free(bitmap); return ERR_PTR(rv); } return bitmap; } /* Loads the bitmap associated with slot and copies the resync information * to our bitmap */ static int bitmap_copy_from_slot(struct mddev *mddev, int slot, sector_t *low, sector_t *high, bool clear_bits) { int rv = 0, i, j; sector_t block, lo = 0, hi = 0; struct bitmap_counts *counts; struct bitmap *bitmap; bitmap = bitmap_get_from_slot(mddev, slot); if (IS_ERR(bitmap)) { pr_err("%s can't get bitmap from slot %d\n", __func__, slot); return -1; } counts = &bitmap->counts; for (j = 0; j < counts->chunks; j++) { block = (sector_t)j << counts->chunkshift; if (md_bitmap_file_test_bit(bitmap, block)) { if (!lo) lo = block; hi = block; md_bitmap_file_clear_bit(bitmap, block); md_bitmap_set_memory_bits(mddev->bitmap, block, 1); md_bitmap_file_set_bit(mddev->bitmap, block); } } if (clear_bits) { bitmap_update_sb(bitmap); /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */ for (i = 0; i < bitmap->storage.file_pages; i++) if (test_page_attr(bitmap, i, BITMAP_PAGE_PENDING)) set_page_attr(bitmap, i, BITMAP_PAGE_NEEDWRITE); __bitmap_unplug(bitmap); } __bitmap_unplug(mddev->bitmap); *low = lo; *high = hi; md_bitmap_free(bitmap); return rv; } static void bitmap_set_pages(void *data, unsigned long pages) { struct bitmap *bitmap = data; bitmap->counts.pages = pages; } static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats) { struct bitmap_storage *storage; struct bitmap_counts *counts; struct bitmap *bitmap = data; bitmap_super_t *sb; if (!bitmap) return -ENOENT; if (!bitmap->mddev->bitmap_info.external && !bitmap->storage.sb_page) return -EINVAL; sb = kmap_local_page(bitmap->storage.sb_page); stats->sync_size = le64_to_cpu(sb->sync_size); kunmap_local(sb); counts = &bitmap->counts; stats->missing_pages = counts->missing_pages; stats->pages = counts->pages; storage = &bitmap->storage; stats->file_pages = storage->file_pages; stats->file = storage->file; stats->behind_writes = atomic_read(&bitmap->behind_writes); stats->behind_wait = wq_has_sleeper(&bitmap->behind_wait); stats->events_cleared = bitmap->events_cleared; return 0; } static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks, int chunksize, bool init) { /* If chunk_size is 0, choose an appropriate chunk size. * Then possibly allocate new storage space. * Then quiesce, copy bits, replace bitmap, and re-start * * This function is called both to set up the initial bitmap * and to resize the bitmap while the array is active. * If this happens as a result of the array being resized, * chunksize will be zero, and we need to choose a suitable * chunksize, otherwise we use what we are given. */ struct bitmap_storage store; struct bitmap_counts old_counts; unsigned long chunks; sector_t block; sector_t old_blocks, new_blocks; int chunkshift; int ret = 0; long pages; struct bitmap_page *new_bp; if (bitmap->storage.file && !init) { pr_info("md: cannot resize file-based bitmap\n"); return -EINVAL; } if (chunksize == 0) { /* If there is enough space, leave the chunk size unchanged, * else increase by factor of two until there is enough space. */ long bytes; long space = bitmap->mddev->bitmap_info.space; if (space == 0) { /* We don't know how much space there is, so limit * to current size - in sectors. */ bytes = DIV_ROUND_UP(bitmap->counts.chunks, 8); if (!bitmap->mddev->bitmap_info.external) bytes += sizeof(bitmap_super_t); space = DIV_ROUND_UP(bytes, 512); bitmap->mddev->bitmap_info.space = space; } chunkshift = bitmap->counts.chunkshift; chunkshift--; do { /* 'chunkshift' is shift from block size to chunk size */ chunkshift++; chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); bytes = DIV_ROUND_UP(chunks, 8); if (!bitmap->mddev->bitmap_info.external) bytes += sizeof(bitmap_super_t); } while (bytes > (space << 9) && (chunkshift + BITMAP_BLOCK_SHIFT) < (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize) - 1)); } else chunkshift = ffz(~chunksize) - BITMAP_BLOCK_SHIFT; chunks = DIV_ROUND_UP_SECTOR_T(blocks, 1 << chunkshift); memset(&store, 0, sizeof(store)); if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file) ret = md_bitmap_storage_alloc(&store, chunks, !bitmap->mddev->bitmap_info.external, mddev_is_clustered(bitmap->mddev) ? bitmap->cluster_slot : 0); if (ret) { md_bitmap_file_unmap(&store); goto err; } pages = DIV_ROUND_UP(chunks, PAGE_COUNTER_RATIO); new_bp = kcalloc(pages, sizeof(*new_bp), GFP_KERNEL); ret = -ENOMEM; if (!new_bp) { md_bitmap_file_unmap(&store); goto err; } if (!init) bitmap->mddev->pers->quiesce(bitmap->mddev, 1); store.file = bitmap->storage.file; bitmap->storage.file = NULL; if (store.sb_page && bitmap->storage.sb_page) memcpy(page_address(store.sb_page), page_address(bitmap->storage.sb_page), sizeof(bitmap_super_t)); spin_lock_irq(&bitmap->counts.lock); md_bitmap_file_unmap(&bitmap->storage); bitmap->storage = store; old_counts = bitmap->counts; bitmap->counts.bp = new_bp; bitmap->counts.pages = pages; bitmap->counts.missing_pages = pages; bitmap->counts.chunkshift = chunkshift; bitmap->counts.chunks = chunks; bitmap->mddev->bitmap_info.chunksize = 1UL << (chunkshift + BITMAP_BLOCK_SHIFT); blocks = min(old_counts.chunks << old_counts.chunkshift, chunks << chunkshift); /* For cluster raid, need to pre-allocate bitmap */ if (mddev_is_clustered(bitmap->mddev)) { unsigned long page; for (page = 0; page < pages; page++) { ret = md_bitmap_checkpage(&bitmap->counts, page, 1, 1); if (ret) { unsigned long k; /* deallocate the page memory */ for (k = 0; k < page; k++) { kfree(new_bp[k].map); } kfree(new_bp); /* restore some fields from old_counts */ bitmap->counts.bp = old_counts.bp; bitmap->counts.pages = old_counts.pages; bitmap->counts.missing_pages = old_counts.pages; bitmap->counts.chunkshift = old_counts.chunkshift; bitmap->counts.chunks = old_counts.chunks; bitmap->mddev->bitmap_info.chunksize = 1UL << (old_counts.chunkshift + BITMAP_BLOCK_SHIFT); blocks = old_counts.chunks << old_counts.chunkshift; pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n"); break; } else bitmap->counts.bp[page].count += 1; } } for (block = 0; block < blocks; ) { bitmap_counter_t *bmc_old, *bmc_new; int set; bmc_old = md_bitmap_get_counter(&old_counts, block, &old_blocks, 0); set = bmc_old && NEEDED(*bmc_old); if (set) { bmc_new = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); if (bmc_new) { if (*bmc_new == 0) { /* need to set on-disk bits too. */ sector_t end = block + new_blocks; sector_t start = block >> chunkshift; start <<= chunkshift; while (start < end) { md_bitmap_file_set_bit(bitmap, block); start += 1 << chunkshift; } *bmc_new = 2; md_bitmap_count_page(&bitmap->counts, block, 1); md_bitmap_set_pending(&bitmap->counts, block); } *bmc_new |= NEEDED_MASK; } if (new_blocks < old_blocks) old_blocks = new_blocks; } block += old_blocks; } if (bitmap->counts.bp != old_counts.bp) { unsigned long k; for (k = 0; k < old_counts.pages; k++) if (!old_counts.bp[k].hijacked) kfree(old_counts.bp[k].map); kfree(old_counts.bp); } if (!init) { int i; while (block < (chunks << chunkshift)) { bitmap_counter_t *bmc; bmc = md_bitmap_get_counter(&bitmap->counts, block, &new_blocks, 1); if (bmc) { /* new space. It needs to be resynced, so * we set NEEDED_MASK. */ if (*bmc == 0) { *bmc = NEEDED_MASK | 2; md_bitmap_count_page(&bitmap->counts, block, 1); md_bitmap_set_pending(&bitmap->counts, block); } } block += new_blocks; } for (i = 0; i < bitmap->storage.file_pages; i++) set_page_attr(bitmap, i, BITMAP_PAGE_DIRTY); } spin_unlock_irq(&bitmap->counts.lock); if (!init) { __bitmap_unplug(bitmap); bitmap->mddev->pers->quiesce(bitmap->mddev, 0); } ret = 0; err: return ret; } static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize, bool init) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return 0; return __bitmap_resize(bitmap, blocks, chunksize, init); } static ssize_t location_show(struct mddev *mddev, char *page) { ssize_t len; if (mddev->bitmap_info.file) len = sprintf(page, "file"); else if (mddev->bitmap_info.offset) len = sprintf(page, "%+lld", (long long)mddev->bitmap_info.offset); else len = sprintf(page, "none"); len += sprintf(page+len, "\n"); return len; } static ssize_t location_store(struct mddev *mddev, const char *buf, size_t len) { int rv; rv = mddev_suspend_and_lock(mddev); if (rv) return rv; if (mddev->pers) { if (mddev->recovery || mddev->sync_thread) { rv = -EBUSY; goto out; } } if (mddev->bitmap || mddev->bitmap_info.file || mddev->bitmap_info.offset) { /* bitmap already configured. Only option is to clear it */ if (strncmp(buf, "none", 4) != 0) { rv = -EBUSY; goto out; } bitmap_destroy(mddev); mddev->bitmap_info.offset = 0; if (mddev->bitmap_info.file) { struct file *f = mddev->bitmap_info.file; mddev->bitmap_info.file = NULL; fput(f); } } else { /* No bitmap, OK to set a location */ long long offset; if (strncmp(buf, "none", 4) == 0) /* nothing to be done */; else if (strncmp(buf, "file:", 5) == 0) { /* Not supported yet */ rv = -EINVAL; goto out; } else { if (buf[0] == '+') rv = kstrtoll(buf+1, 10, &offset); else rv = kstrtoll(buf, 10, &offset); if (rv) goto out; if (offset == 0) { rv = -EINVAL; goto out; } if (mddev->bitmap_info.external == 0 && mddev->major_version == 0 && offset != mddev->bitmap_info.default_offset) { rv = -EINVAL; goto out; } mddev->bitmap_info.offset = offset; rv = bitmap_create(mddev); if (rv) goto out; rv = bitmap_load(mddev); if (rv) { mddev->bitmap_info.offset = 0; bitmap_destroy(mddev); goto out; } } } if (!mddev->external) { /* Ensure new bitmap info is stored in * metadata promptly. */ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); } rv = 0; out: mddev_unlock_and_resume(mddev); if (rv) return rv; return len; } static struct md_sysfs_entry bitmap_location = __ATTR(location, S_IRUGO|S_IWUSR, location_show, location_store); /* 'bitmap/space' is the space available at 'location' for the * bitmap. This allows the kernel to know when it is safe to * resize the bitmap to match a resized array. */ static ssize_t space_show(struct mddev *mddev, char *page) { return sprintf(page, "%lu\n", mddev->bitmap_info.space); } static ssize_t space_store(struct mddev *mddev, const char *buf, size_t len) { struct bitmap *bitmap; unsigned long sectors; int rv; rv = kstrtoul(buf, 10, &sectors); if (rv) return rv; if (sectors == 0) return -EINVAL; bitmap = mddev->bitmap; if (bitmap && sectors < (bitmap->storage.bytes + 511) >> 9) return -EFBIG; /* Bitmap is too big for this small space */ /* could make sure it isn't too big, but that isn't really * needed - user-space should be careful. */ mddev->bitmap_info.space = sectors; return len; } static struct md_sysfs_entry bitmap_space = __ATTR(space, S_IRUGO|S_IWUSR, space_show, space_store); static ssize_t timeout_show(struct mddev *mddev, char *page) { ssize_t len; unsigned long secs = mddev->bitmap_info.daemon_sleep / HZ; unsigned long jifs = mddev->bitmap_info.daemon_sleep % HZ; len = sprintf(page, "%lu", secs); if (jifs) len += sprintf(page+len, ".%03u", jiffies_to_msecs(jifs)); len += sprintf(page+len, "\n"); return len; } static ssize_t timeout_store(struct mddev *mddev, const char *buf, size_t len) { /* timeout can be set at any time */ unsigned long timeout; int rv = strict_strtoul_scaled(buf, &timeout, 4); if (rv) return rv; /* just to make sure we don't overflow... */ if (timeout >= LONG_MAX / HZ) return -EINVAL; timeout = timeout * HZ / 10000; if (timeout >= MAX_SCHEDULE_TIMEOUT) timeout = MAX_SCHEDULE_TIMEOUT-1; if (timeout < 1) timeout = 1; mddev->bitmap_info.daemon_sleep = timeout; mddev_set_timeout(mddev, timeout, false); md_wakeup_thread(mddev->thread); return len; } static struct md_sysfs_entry bitmap_timeout = __ATTR(time_base, S_IRUGO|S_IWUSR, timeout_show, timeout_store); static ssize_t backlog_show(struct mddev *mddev, char *page) { return sprintf(page, "%lu\n", mddev->bitmap_info.max_write_behind); } static ssize_t backlog_store(struct mddev *mddev, const char *buf, size_t len) { unsigned long backlog; unsigned long old_mwb = mddev->bitmap_info.max_write_behind; struct md_rdev *rdev; bool has_write_mostly = false; int rv = kstrtoul(buf, 10, &backlog); if (rv) return rv; if (backlog > COUNTER_MAX) return -EINVAL; rv = mddev_suspend_and_lock(mddev); if (rv) return rv; /* * Without write mostly device, it doesn't make sense to set * backlog for max_write_behind. */ rdev_for_each(rdev, mddev) { if (test_bit(WriteMostly, &rdev->flags)) { has_write_mostly = true; break; } } if (!has_write_mostly) { pr_warn_ratelimited("%s: can't set backlog, no write mostly device available\n", mdname(mddev)); mddev_unlock(mddev); return -EINVAL; } mddev->bitmap_info.max_write_behind = backlog; if (!backlog && mddev->serial_info_pool) { /* serial_info_pool is not needed if backlog is zero */ if (!mddev->serialize_policy) mddev_destroy_serial_pool(mddev, NULL); } else if (backlog && !mddev->serial_info_pool) { /* serial_info_pool is needed since backlog is not zero */ rdev_for_each(rdev, mddev) mddev_create_serial_pool(mddev, rdev); } if (old_mwb != backlog) bitmap_update_sb(mddev->bitmap); mddev_unlock_and_resume(mddev); return len; } static struct md_sysfs_entry bitmap_backlog = __ATTR(backlog, S_IRUGO|S_IWUSR, backlog_show, backlog_store); static ssize_t chunksize_show(struct mddev *mddev, char *page) { return sprintf(page, "%lu\n", mddev->bitmap_info.chunksize); } static ssize_t chunksize_store(struct mddev *mddev, const char *buf, size_t len) { /* Can only be changed when no bitmap is active */ int rv; unsigned long csize; if (mddev->bitmap) return -EBUSY; rv = kstrtoul(buf, 10, &csize); if (rv) return rv; if (csize < 512 || !is_power_of_2(csize)) return -EINVAL; if (BITS_PER_LONG > 32 && csize >= (1ULL << (BITS_PER_BYTE * sizeof(((bitmap_super_t *)0)->chunksize)))) return -EOVERFLOW; mddev->bitmap_info.chunksize = csize; return len; } static struct md_sysfs_entry bitmap_chunksize = __ATTR(chunksize, S_IRUGO|S_IWUSR, chunksize_show, chunksize_store); static ssize_t metadata_show(struct mddev *mddev, char *page) { if (mddev_is_clustered(mddev)) return sprintf(page, "clustered\n"); return sprintf(page, "%s\n", (mddev->bitmap_info.external ? "external" : "internal")); } static ssize_t metadata_store(struct mddev *mddev, const char *buf, size_t len) { if (mddev->bitmap || mddev->bitmap_info.file || mddev->bitmap_info.offset) return -EBUSY; if (strncmp(buf, "external", 8) == 0) mddev->bitmap_info.external = 1; else if ((strncmp(buf, "internal", 8) == 0) || (strncmp(buf, "clustered", 9) == 0)) mddev->bitmap_info.external = 0; else return -EINVAL; return len; } static struct md_sysfs_entry bitmap_metadata = __ATTR(metadata, S_IRUGO|S_IWUSR, metadata_show, metadata_store); static ssize_t can_clear_show(struct mddev *mddev, char *page) { int len; struct bitmap *bitmap; spin_lock(&mddev->lock); bitmap = mddev->bitmap; if (bitmap) len = sprintf(page, "%s\n", (bitmap->need_sync ? "false" : "true")); else len = sprintf(page, "\n"); spin_unlock(&mddev->lock); return len; } static ssize_t can_clear_store(struct mddev *mddev, const char *buf, size_t len) { struct bitmap *bitmap = mddev->bitmap; if (!bitmap) return -ENOENT; if (strncmp(buf, "false", 5) == 0) { bitmap->need_sync = 1; return len; } if (strncmp(buf, "true", 4) == 0) { if (mddev->degraded) return -EBUSY; bitmap->need_sync = 0; return len; } return -EINVAL; } static struct md_sysfs_entry bitmap_can_clear = __ATTR(can_clear, S_IRUGO|S_IWUSR, can_clear_show, can_clear_store); static ssize_t behind_writes_used_show(struct mddev *mddev, char *page) { ssize_t ret; struct bitmap *bitmap; spin_lock(&mddev->lock); bitmap = mddev->bitmap; if (!bitmap) ret = sprintf(page, "0\n"); else ret = sprintf(page, "%lu\n", bitmap->behind_writes_used); spin_unlock(&mddev->lock); return ret; } static ssize_t behind_writes_used_reset(struct mddev *mddev, const char *buf, size_t len) { struct bitmap *bitmap = mddev->bitmap; if (bitmap) bitmap->behind_writes_used = 0; return len; } static struct md_sysfs_entry max_backlog_used = __ATTR(max_backlog_used, S_IRUGO | S_IWUSR, behind_writes_used_show, behind_writes_used_reset); static struct attribute *md_bitmap_attrs[] = { &bitmap_location.attr, &bitmap_space.attr, &bitmap_timeout.attr, &bitmap_backlog.attr, &bitmap_chunksize.attr, &bitmap_metadata.attr, &bitmap_can_clear.attr, &max_backlog_used.attr, NULL }; const struct attribute_group md_bitmap_group = { .name = "bitmap", .attrs = md_bitmap_attrs, }; static struct bitmap_operations bitmap_ops = { .enabled = bitmap_enabled, .create = bitmap_create, .resize = bitmap_resize, .load = bitmap_load, .destroy = bitmap_destroy, .flush = bitmap_flush, .write_all = bitmap_write_all, .dirty_bits = bitmap_dirty_bits, .unplug = bitmap_unplug, .daemon_work = bitmap_daemon_work, .start_behind_write = bitmap_start_behind_write, .end_behind_write = bitmap_end_behind_write, .wait_behind_writes = bitmap_wait_behind_writes, .start_write = bitmap_start_write, .end_write = bitmap_end_write, .start_sync = bitmap_start_sync, .end_sync = bitmap_end_sync, .cond_end_sync = bitmap_cond_end_sync, .close_sync = bitmap_close_sync, .update_sb = bitmap_update_sb, .get_stats = bitmap_get_stats, .sync_with_cluster = bitmap_sync_with_cluster, .get_from_slot = bitmap_get_from_slot, .copy_from_slot = bitmap_copy_from_slot, .set_pages = bitmap_set_pages, .free = md_bitmap_free, }; void mddev_set_bitmap_ops(struct mddev *mddev) { mddev->bitmap_ops = &bitmap_ops; }
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 // SPDX-License-Identifier: GPL-2.0-only /* DVB USB framework compliant Linux driver for the HanfTek UMT-010 USB2.0 * DVB-T receiver. * * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) * * see Documentation/driver-api/media/drivers/dvb-usb.rst for more information */ #include "dibusb.h" #include "mt352.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int umt_mt352_demod_init(struct dvb_frontend *fe) { static u8 mt352_clock_config[] = { 0x89, 0xb8, 0x2d }; static u8 mt352_reset[] = { 0x50, 0x80 }; static u8 mt352_mclk_ratio[] = { 0x8b, 0x00 }; static u8 mt352_adc_ctl_1_cfg[] = { 0x8E, 0x40 }; static u8 mt352_agc_cfg[] = { 0x67, 0x10, 0xa0 }; static u8 mt352_sec_agc_cfg1[] = { 0x6a, 0xff }; static u8 mt352_sec_agc_cfg2[] = { 0x6d, 0xff }; static u8 mt352_sec_agc_cfg3[] = { 0x70, 0x40 }; static u8 mt352_sec_agc_cfg4[] = { 0x7b, 0x03 }; static u8 mt352_sec_agc_cfg5[] = { 0x7d, 0x0f }; static u8 mt352_acq_ctl[] = { 0x53, 0x50 }; static u8 mt352_input_freq_1[] = { 0x56, 0x31, 0x06 }; mt352_write(fe, mt352_clock_config, sizeof(mt352_clock_config)); udelay(2000); mt352_write(fe, mt352_reset, sizeof(mt352_reset)); mt352_write(fe, mt352_mclk_ratio, sizeof(mt352_mclk_ratio)); mt352_write(fe, mt352_adc_ctl_1_cfg, sizeof(mt352_adc_ctl_1_cfg)); mt352_write(fe, mt352_agc_cfg, sizeof(mt352_agc_cfg)); mt352_write(fe, mt352_sec_agc_cfg1, sizeof(mt352_sec_agc_cfg1)); mt352_write(fe, mt352_sec_agc_cfg2, sizeof(mt352_sec_agc_cfg2)); mt352_write(fe, mt352_sec_agc_cfg3, sizeof(mt352_sec_agc_cfg3)); mt352_write(fe, mt352_sec_agc_cfg4, sizeof(mt352_sec_agc_cfg4)); mt352_write(fe, mt352_sec_agc_cfg5, sizeof(mt352_sec_agc_cfg5)); mt352_write(fe, mt352_acq_ctl, sizeof(mt352_acq_ctl)); mt352_write(fe, mt352_input_freq_1, sizeof(mt352_input_freq_1)); return 0; } static int umt_mt352_frontend_attach(struct dvb_usb_adapter *adap) { struct mt352_config umt_config; memset(&umt_config,0,sizeof(struct mt352_config)); umt_config.demod_init = umt_mt352_demod_init; umt_config.demod_address = 0xf; adap->fe_adap[0].fe = dvb_attach(mt352_attach, &umt_config, &adap->dev->i2c_adap); return 0; } static int umt_tuner_attach (struct dvb_usb_adapter *adap) { dvb_attach(dvb_pll_attach, adap->fe_adap[0].fe, 0x61, NULL, DVB_PLL_TUA6034); return 0; } /* USB Driver stuff */ static struct dvb_usb_device_properties umt_properties; static int umt_probe(struct usb_interface *intf, const struct usb_device_id *id) { if (0 == dvb_usb_device_init(intf, &umt_properties, THIS_MODULE, NULL, adapter_nr)) return 0; return -EINVAL; } /* do not change the order of the ID table */ enum { HANFTEK_UMT_010_COLD, HANFTEK_UMT_010_WARM, }; static const struct usb_device_id umt_table[] = { DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_COLD), DVB_USB_DEV(HANFTEK, HANFTEK_UMT_010_WARM), { } }; MODULE_DEVICE_TABLE (usb, umt_table); static struct dvb_usb_device_properties umt_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .usb_ctrl = CYPRESS_FX2, .firmware = "dvb-usb-umt-010-02.fw", .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .streaming_ctrl = dibusb2_0_streaming_ctrl, .frontend_attach = umt_mt352_frontend_attach, .tuner_attach = umt_tuner_attach, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = MAX_NO_URBS_FOR_DATA_STREAM, .endpoint = 0x06, .u = { .bulk = { .buffersize = 512, } } }, }}, .size_of_priv = sizeof(struct dibusb_state), } }, .power_ctrl = dibusb_power_ctrl, .i2c_algo = &dibusb_i2c_algo, .generic_bulk_ctrl_endpoint = 0x01, .num_device_descs = 1, .devices = { { "Hanftek UMT-010 DVB-T USB2.0", { &umt_table[HANFTEK_UMT_010_COLD], NULL }, { &umt_table[HANFTEK_UMT_010_WARM], NULL }, }, } }; static struct usb_driver umt_driver = { .name = "dvb_usb_umt_010", .probe = umt_probe, .disconnect = dvb_usb_device_exit, .id_table = umt_table, }; module_usb_driver(umt_driver); MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>"); MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL");
5 2 398 447 447 3225 447 447 3228 575 575 90 90 447 17 2241 2254 2256 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 // SPDX-License-Identifier: GPL-2.0-only /* * fs/anon_inodes.c * * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org> * * Thanks to Arnd Bergmann for code review and suggestions. * More changes for Thomas Gleixner suggestions. * */ #include <linux/cred.h> #include <linux/file.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/magic.h> #include <linux/anon_inodes.h> #include <linux/pseudo_fs.h> #include <linux/uaccess.h> #include "internal.h" static struct vfsmount *anon_inode_mnt __ro_after_init; static struct inode *anon_inode_inode __ro_after_init; /* * User space expects anonymous inodes to have no file type in st_mode. * * In particular, 'lsof' has this legacy logic: * * type = s->st_mode & S_IFMT; * switch (type) { * ... * case 0: * if (!strcmp(p, "anon_inode")) * Lf->ntype = Ntype = N_ANON_INODE; * * to detect our old anon_inode logic. * * Rather than mess with our internal sane inode data, just fix it * up here in getattr() by masking off the format bits. */ int anon_inode_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); stat->mode &= ~S_IFMT; return 0; } int anon_inode_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { return -EOPNOTSUPP; } static const struct inode_operations anon_inode_operations = { .getattr = anon_inode_getattr, .setattr = anon_inode_setattr, }; /* * anon_inodefs_dname() is called from d_path(). */ static char *anon_inodefs_dname(struct dentry *dentry, char *buffer, int buflen) { return dynamic_dname(buffer, buflen, "anon_inode:%s", dentry->d_name.name); } static const struct dentry_operations anon_inodefs_dentry_operations = { .d_dname = anon_inodefs_dname, }; static int anon_inodefs_init_fs_context(struct fs_context *fc) { struct pseudo_fs_context *ctx = init_pseudo(fc, ANON_INODE_FS_MAGIC); if (!ctx) return -ENOMEM; fc->s_iflags |= SB_I_NOEXEC; fc->s_iflags |= SB_I_NODEV; ctx->dops = &anon_inodefs_dentry_operations; return 0; } static struct file_system_type anon_inode_fs_type = { .name = "anon_inodefs", .init_fs_context = anon_inodefs_init_fs_context, .kill_sb = kill_anon_super, }; static struct inode *anon_inode_make_secure_inode( const char *name, const struct inode *context_inode) { struct inode *inode; int error; inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); if (IS_ERR(inode)) return inode; inode->i_flags &= ~S_PRIVATE; inode->i_op = &anon_inode_operations; error = security_inode_init_security_anon(inode, &QSTR(name), context_inode); if (error) { iput(inode); return ERR_PTR(error); } return inode; } static struct file *__anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode, bool make_inode) { struct inode *inode; struct file *file; if (fops->owner && !try_module_get(fops->owner)) return ERR_PTR(-ENOENT); if (make_inode) { inode = anon_inode_make_secure_inode(name, context_inode); if (IS_ERR(inode)) { file = ERR_CAST(inode); goto err; } } else { inode = anon_inode_inode; if (IS_ERR(inode)) { file = ERR_PTR(-ENODEV); goto err; } /* * We know the anon_inode inode count is always * greater than zero, so ihold() is safe. */ ihold(inode); } file = alloc_file_pseudo(inode, anon_inode_mnt, name, flags & (O_ACCMODE | O_NONBLOCK), fops); if (IS_ERR(file)) goto err_iput; file->f_mapping = inode->i_mapping; file->private_data = priv; return file; err_iput: iput(inode); err: module_put(fops->owner); return file; } /** * anon_inode_getfile - creates a new file instance by hooking it up to an * anonymous inode, and a dentry that describe the "class" * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. * All the files created with anon_inode_getfile() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry * setup. Returns the newly created file* or an error pointer. */ struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags) { return __anon_inode_getfile(name, fops, priv, flags, NULL, false); } EXPORT_SYMBOL_GPL(anon_inode_getfile); /** * anon_inode_getfile_fmode - creates a new file instance by hooking it up to an * anonymous inode, and a dentry that describe the "class" * of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @f_mode: [in] fmode * * Creates a new file by hooking it on a single inode. This is useful for files * that do not need to have a full-fledged inode in order to operate correctly. * All the files created with anon_inode_getfile() will share a single inode, * hence saving memory and avoiding code duplication for the file/inode/dentry * setup. Allows setting the fmode. Returns the newly created file* or an error * pointer. */ struct file *anon_inode_getfile_fmode(const char *name, const struct file_operations *fops, void *priv, int flags, fmode_t f_mode) { struct file *file; file = __anon_inode_getfile(name, fops, priv, flags, NULL, false); if (!IS_ERR(file)) file->f_mode |= f_mode; return file; } EXPORT_SYMBOL_GPL(anon_inode_getfile_fmode); /** * anon_inode_create_getfile - Like anon_inode_getfile(), but creates a new * !S_PRIVATE anon inode rather than reuse the * singleton anon inode and calls the * inode_init_security_anon() LSM hook. * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @context_inode: * [in] the logical relationship with the new inode (optional) * * Create a new anonymous inode and file pair. This can be done for two * reasons: * * - for the inode to have its own security context, so that LSMs can enforce * policy on the inode's creation; * * - if the caller needs a unique inode, for example in order to customize * the size returned by fstat() * * The LSM may use @context_inode in inode_init_security_anon(), but a * reference to it is not held. * * Returns the newly created file* or an error pointer. */ struct file *anon_inode_create_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode) { return __anon_inode_getfile(name, fops, priv, flags, context_inode, true); } EXPORT_SYMBOL_GPL(anon_inode_create_getfile); static int __anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode, bool make_inode) { int error, fd; struct file *file; error = get_unused_fd_flags(flags); if (error < 0) return error; fd = error; file = __anon_inode_getfile(name, fops, priv, flags, context_inode, make_inode); if (IS_ERR(file)) { error = PTR_ERR(file); goto err_put_unused_fd; } fd_install(fd, file); return fd; err_put_unused_fd: put_unused_fd(fd); return error; } /** * anon_inode_getfd - creates a new file instance by hooking it up to * an anonymous inode and a dentry that describe * the "class" of the file * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * * Creates a new file by hooking it on a single inode. This is * useful for files that do not need to have a full-fledged inode in * order to operate correctly. All the files created with * anon_inode_getfd() will use the same singleton inode, reducing * memory use and avoiding code duplication for the file/inode/dentry * setup. Returns a newly created file descriptor or an error code. */ int anon_inode_getfd(const char *name, const struct file_operations *fops, void *priv, int flags) { return __anon_inode_getfd(name, fops, priv, flags, NULL, false); } EXPORT_SYMBOL_GPL(anon_inode_getfd); /** * anon_inode_create_getfd - Like anon_inode_getfd(), but creates a new * !S_PRIVATE anon inode rather than reuse the singleton anon inode, and calls * the inode_init_security_anon() LSM hook. * * @name: [in] name of the "class" of the new file * @fops: [in] file operations for the new file * @priv: [in] private data for the new file (will be file's private_data) * @flags: [in] flags * @context_inode: * [in] the logical relationship with the new inode (optional) * * Create a new anonymous inode and file pair. This can be done for two * reasons: * * - for the inode to have its own security context, so that LSMs can enforce * policy on the inode's creation; * * - if the caller needs a unique inode, for example in order to customize * the size returned by fstat() * * The LSM may use @context_inode in inode_init_security_anon(), but a * reference to it is not held. * * Returns a newly created file descriptor or an error code. */ int anon_inode_create_getfd(const char *name, const struct file_operations *fops, void *priv, int flags, const struct inode *context_inode) { return __anon_inode_getfd(name, fops, priv, flags, context_inode, true); } static int __init anon_inode_init(void) { anon_inode_mnt = kern_mount(&anon_inode_fs_type); if (IS_ERR(anon_inode_mnt)) panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt)); anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); if (IS_ERR(anon_inode_inode)) panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode)); anon_inode_inode->i_op = &anon_inode_operations; return 0; } fs_initcall(anon_inode_init);
1 4 24 1 1 3 7 1 6 1 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 #include <asm/ioctls.h> #include <linux/io_uring/net.h> #include <net/sock.h> #include "uring_cmd.h" static inline int io_uring_cmd_getsockopt(struct socket *sock, struct io_uring_cmd *cmd, unsigned int issue_flags) { const struct io_uring_sqe *sqe = cmd->sqe; bool compat = !!(issue_flags & IO_URING_F_COMPAT); int optlen, optname, level, err; void __user *optval; level = READ_ONCE(sqe->level); if (level != SOL_SOCKET) return -EOPNOTSUPP; optval = u64_to_user_ptr(READ_ONCE(sqe->optval)); optname = READ_ONCE(sqe->optname); optlen = READ_ONCE(sqe->optlen); err = do_sock_getsockopt(sock, compat, level, optname, USER_SOCKPTR(optval), KERNEL_SOCKPTR(&optlen)); if (err) return err; /* On success, return optlen */ return optlen; } static inline int io_uring_cmd_setsockopt(struct socket *sock, struct io_uring_cmd *cmd, unsigned int issue_flags) { const struct io_uring_sqe *sqe = cmd->sqe; bool compat = !!(issue_flags & IO_URING_F_COMPAT); int optname, optlen, level; void __user *optval; sockptr_t optval_s; optval = u64_to_user_ptr(READ_ONCE(sqe->optval)); optname = READ_ONCE(sqe->optname); optlen = READ_ONCE(sqe->optlen); level = READ_ONCE(sqe->level); optval_s = USER_SOCKPTR(optval); return do_sock_setsockopt(sock, compat, level, optname, optval_s, optlen); } int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) { struct socket *sock = cmd->file->private_data; struct sock *sk = sock->sk; struct proto *prot = READ_ONCE(sk->sk_prot); int ret, arg = 0; if (!prot || !prot->ioctl) return -EOPNOTSUPP; switch (cmd->cmd_op) { case SOCKET_URING_OP_SIOCINQ: ret = prot->ioctl(sk, SIOCINQ, &arg); if (ret) return ret; return arg; case SOCKET_URING_OP_SIOCOUTQ: ret = prot->ioctl(sk, SIOCOUTQ, &arg); if (ret) return ret; return arg; case SOCKET_URING_OP_GETSOCKOPT: return io_uring_cmd_getsockopt(sock, cmd, issue_flags); case SOCKET_URING_OP_SETSOCKOPT: return io_uring_cmd_setsockopt(sock, cmd, issue_flags); default: return -EOPNOTSUPP; } } EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
3 3 3 3 3 2 2 2 2 3 3 3 3 3 3 12 9 3 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 // SPDX-License-Identifier: GPL-2.0-only /* * Line 6 Linux USB driver * * Copyright (C) 2004-2010 Markus Grabner (line6@grabner-graz.at) */ #include <linux/slab.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "capture.h" #include "driver.h" #include "playback.h" /* impulse response volume controls */ static int snd_line6_impulse_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 255; return 0; } static int snd_line6_impulse_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = line6pcm->impulse_volume; return 0; } static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); int value = ucontrol->value.integer.value[0]; int err; if (line6pcm->impulse_volume == value) return 0; line6pcm->impulse_volume = value; if (value > 0) { err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE, true); if (err < 0) { line6pcm->impulse_volume = 0; return err; } } else { line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE); } return 1; } /* impulse response period controls */ static int snd_line6_impulse_period_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 2000; return 0; } static int snd_line6_impulse_period_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = line6pcm->impulse_period; return 0; } static int snd_line6_impulse_period_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); int value = ucontrol->value.integer.value[0]; if (line6pcm->impulse_period == value) return 0; line6pcm->impulse_period = value; return 1; } /* Unlink all currently active URBs. */ static void line6_unlink_audio_urbs(struct snd_line6_pcm *line6pcm, struct line6_pcm_stream *pcms) { int i; for (i = 0; i < line6pcm->line6->iso_buffers; i++) { if (test_bit(i, &pcms->active_urbs)) { if (!test_and_set_bit(i, &pcms->unlink_urbs)) usb_unlink_urb(pcms->urbs[i]); } } } /* Wait until unlinking of all currently active URBs has been finished. */ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm, struct line6_pcm_stream *pcms) { int timeout = HZ; int i; int alive; do { alive = 0; for (i = 0; i < line6pcm->line6->iso_buffers; i++) { if (test_bit(i, &pcms->active_urbs)) alive++; } if (!alive) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1); } while (--timeout > 0); if (alive) dev_err(line6pcm->line6->ifcdev, "timeout: still %d active urbs..\n", alive); } static inline struct line6_pcm_stream * get_stream(struct snd_line6_pcm *line6pcm, int direction) { return (direction == SNDRV_PCM_STREAM_PLAYBACK) ? &line6pcm->out : &line6pcm->in; } /* allocate a buffer if not opened yet; * call this in line6pcm.state_mutex */ static int line6_buffer_acquire(struct snd_line6_pcm *line6pcm, struct line6_pcm_stream *pstr, int direction, int type) { const int pkt_size = (direction == SNDRV_PCM_STREAM_PLAYBACK) ? line6pcm->max_packet_size_out : line6pcm->max_packet_size_in; /* Invoked multiple times in a row so allocate once only */ if (!test_and_set_bit(type, &pstr->opened) && !pstr->buffer) { pstr->buffer = kmalloc(array3_size(line6pcm->line6->iso_buffers, LINE6_ISO_PACKETS, pkt_size), GFP_KERNEL); if (!pstr->buffer) return -ENOMEM; } return 0; } /* free a buffer if all streams are closed; * call this in line6pcm.state_mutex */ static void line6_buffer_release(struct snd_line6_pcm *line6pcm, struct line6_pcm_stream *pstr, int type) { clear_bit(type, &pstr->opened); if (!pstr->opened) { line6_wait_clear_audio_urbs(line6pcm, pstr); kfree(pstr->buffer); pstr->buffer = NULL; } } /* start a PCM stream */ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction, int type) { unsigned long flags; struct line6_pcm_stream *pstr = get_stream(line6pcm, direction); int ret = 0; spin_lock_irqsave(&pstr->lock, flags); if (!test_and_set_bit(type, &pstr->running) && !(pstr->active_urbs || pstr->unlink_urbs)) { pstr->count = 0; /* Submit all currently available URBs */ if (direction == SNDRV_PCM_STREAM_PLAYBACK) ret = line6_submit_audio_out_all_urbs(line6pcm); else ret = line6_submit_audio_in_all_urbs(line6pcm); } if (ret < 0) clear_bit(type, &pstr->running); spin_unlock_irqrestore(&pstr->lock, flags); return ret; } /* stop a PCM stream; this doesn't sync with the unlinked URBs */ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction, int type) { unsigned long flags; struct line6_pcm_stream *pstr = get_stream(line6pcm, direction); spin_lock_irqsave(&pstr->lock, flags); clear_bit(type, &pstr->running); if (!pstr->running) { spin_unlock_irqrestore(&pstr->lock, flags); line6_unlink_audio_urbs(line6pcm, pstr); spin_lock_irqsave(&pstr->lock, flags); if (direction == SNDRV_PCM_STREAM_CAPTURE) { line6pcm->prev_fbuf = NULL; line6pcm->prev_fsize = 0; } } spin_unlock_irqrestore(&pstr->lock, flags); } /* common PCM trigger callback */ int snd_line6_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct snd_pcm_substream *s; int err; clear_bit(LINE6_FLAG_PREPARED, &line6pcm->flags); snd_pcm_group_for_each_entry(s, substream) { if (s->pcm->card != substream->pcm->card) continue; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (s->stream == SNDRV_PCM_STREAM_CAPTURE && (line6pcm->line6->properties->capabilities & LINE6_CAP_IN_NEEDS_OUT)) { err = line6_stream_start(line6pcm, SNDRV_PCM_STREAM_PLAYBACK, LINE6_STREAM_CAPTURE_HELPER); if (err < 0) return err; } err = line6_stream_start(line6pcm, s->stream, LINE6_STREAM_PCM); if (err < 0) return err; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (s->stream == SNDRV_PCM_STREAM_CAPTURE && (line6pcm->line6->properties->capabilities & LINE6_CAP_IN_NEEDS_OUT)) { line6_stream_stop(line6pcm, SNDRV_PCM_STREAM_PLAYBACK, LINE6_STREAM_CAPTURE_HELPER); } line6_stream_stop(line6pcm, s->stream, LINE6_STREAM_PCM); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) return -EINVAL; set_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags); break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (s->stream != SNDRV_PCM_STREAM_PLAYBACK) return -EINVAL; clear_bit(LINE6_FLAG_PAUSE_PLAYBACK, &line6pcm->flags); break; default: return -EINVAL; } } return 0; } /* common PCM pointer callback */ snd_pcm_uframes_t snd_line6_pointer(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream); return pstr->pos_done; } /* Acquire and optionally start duplex streams: * type is either LINE6_STREAM_IMPULSE or LINE6_STREAM_MONITOR */ int line6_pcm_acquire(struct snd_line6_pcm *line6pcm, int type, bool start) { struct line6_pcm_stream *pstr; int ret = 0, dir; /* TODO: We should assert SNDRV_PCM_STREAM_PLAYBACK/CAPTURE == 0/1 */ mutex_lock(&line6pcm->state_mutex); for (dir = 0; dir < 2; dir++) { pstr = get_stream(line6pcm, dir); ret = line6_buffer_acquire(line6pcm, pstr, dir, type); if (ret < 0) goto error; if (!pstr->running) line6_wait_clear_audio_urbs(line6pcm, pstr); } if (start) { for (dir = 0; dir < 2; dir++) { ret = line6_stream_start(line6pcm, dir, type); if (ret < 0) goto error; } } error: mutex_unlock(&line6pcm->state_mutex); if (ret < 0) line6_pcm_release(line6pcm, type); return ret; } EXPORT_SYMBOL_GPL(line6_pcm_acquire); /* Stop and release duplex streams */ void line6_pcm_release(struct snd_line6_pcm *line6pcm, int type) { struct line6_pcm_stream *pstr; int dir; mutex_lock(&line6pcm->state_mutex); for (dir = 0; dir < 2; dir++) line6_stream_stop(line6pcm, dir, type); for (dir = 0; dir < 2; dir++) { pstr = get_stream(line6pcm, dir); line6_buffer_release(line6pcm, pstr, type); } mutex_unlock(&line6pcm->state_mutex); } EXPORT_SYMBOL_GPL(line6_pcm_release); /* common PCM hw_params callback */ int snd_line6_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int ret; struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream); mutex_lock(&line6pcm->state_mutex); ret = line6_buffer_acquire(line6pcm, pstr, substream->stream, LINE6_STREAM_PCM); if (ret < 0) goto error; pstr->period = params_period_bytes(hw_params); error: mutex_unlock(&line6pcm->state_mutex); return ret; } /* common PCM hw_free callback */ int snd_line6_hw_free(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream); mutex_lock(&line6pcm->state_mutex); line6_buffer_release(line6pcm, pstr, LINE6_STREAM_PCM); mutex_unlock(&line6pcm->state_mutex); return 0; } /* control info callback */ static int snd_line6_control_playback_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 256; return 0; } /* control get callback */ static int snd_line6_control_playback_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i; struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); for (i = 0; i < 2; i++) ucontrol->value.integer.value[i] = line6pcm->volume_playback[i]; return 0; } /* control put callback */ static int snd_line6_control_playback_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { int i, changed = 0; struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol); for (i = 0; i < 2; i++) if (line6pcm->volume_playback[i] != ucontrol->value.integer.value[i]) { line6pcm->volume_playback[i] = ucontrol->value.integer.value[i]; changed = 1; } return changed; } /* control definition */ static const struct snd_kcontrol_new line6_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "PCM Playback Volume", .info = snd_line6_control_playback_info, .get = snd_line6_control_playback_get, .put = snd_line6_control_playback_put }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Impulse Response Volume", .info = snd_line6_impulse_volume_info, .get = snd_line6_impulse_volume_get, .put = snd_line6_impulse_volume_put }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Impulse Response Period", .info = snd_line6_impulse_period_info, .get = snd_line6_impulse_period_get, .put = snd_line6_impulse_period_put }, }; /* Cleanup the PCM device. */ static void cleanup_urbs(struct line6_pcm_stream *pcms, int iso_buffers) { int i; /* Most likely impossible in current code... */ if (pcms->urbs == NULL) return; for (i = 0; i < iso_buffers; i++) { if (pcms->urbs[i]) { usb_kill_urb(pcms->urbs[i]); usb_free_urb(pcms->urbs[i]); } } kfree(pcms->urbs); pcms->urbs = NULL; } static void line6_cleanup_pcm(struct snd_pcm *pcm) { struct snd_line6_pcm *line6pcm = snd_pcm_chip(pcm); cleanup_urbs(&line6pcm->out, line6pcm->line6->iso_buffers); cleanup_urbs(&line6pcm->in, line6pcm->line6->iso_buffers); kfree(line6pcm); } /* create a PCM device */ static int snd_line6_new_pcm(struct usb_line6 *line6, struct snd_pcm **pcm_ret) { struct snd_pcm *pcm; int err; err = snd_pcm_new(line6->card, (char *)line6->properties->name, 0, 1, 1, pcm_ret); if (err < 0) return err; pcm = *pcm_ret; strcpy(pcm->name, line6->properties->name); /* set operators */ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_line6_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_line6_capture_ops); /* pre-allocation of buffers */ snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, NULL, 64 * 1024, 128 * 1024); return 0; } /* Sync with PCM stream stops. */ void line6_pcm_disconnect(struct snd_line6_pcm *line6pcm) { line6_unlink_audio_urbs(line6pcm, &line6pcm->out); line6_unlink_audio_urbs(line6pcm, &line6pcm->in); line6_wait_clear_audio_urbs(line6pcm, &line6pcm->out); line6_wait_clear_audio_urbs(line6pcm, &line6pcm->in); } /* Create and register the PCM device and mixer entries. Create URBs for playback and capture. */ int line6_init_pcm(struct usb_line6 *line6, struct line6_pcm_properties *properties) { int i, err; unsigned ep_read = line6->properties->ep_audio_r; unsigned ep_write = line6->properties->ep_audio_w; struct snd_pcm *pcm; struct snd_line6_pcm *line6pcm; if (!(line6->properties->capabilities & LINE6_CAP_PCM)) return 0; /* skip PCM initialization and report success */ err = snd_line6_new_pcm(line6, &pcm); if (err < 0) return err; line6pcm = kzalloc(sizeof(*line6pcm), GFP_KERNEL); if (!line6pcm) return -ENOMEM; mutex_init(&line6pcm->state_mutex); line6pcm->pcm = pcm; line6pcm->properties = properties; line6pcm->volume_playback[0] = line6pcm->volume_playback[1] = 255; line6pcm->volume_monitor = 255; line6pcm->line6 = line6; spin_lock_init(&line6pcm->out.lock); spin_lock_init(&line6pcm->in.lock); line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD; line6->line6pcm = line6pcm; pcm->private_data = line6pcm; pcm->private_free = line6_cleanup_pcm; line6pcm->max_packet_size_in = usb_maxpacket(line6->usbdev, usb_rcvisocpipe(line6->usbdev, ep_read)); line6pcm->max_packet_size_out = usb_maxpacket(line6->usbdev, usb_sndisocpipe(line6->usbdev, ep_write)); if (!line6pcm->max_packet_size_in || !line6pcm->max_packet_size_out) { dev_err(line6pcm->line6->ifcdev, "cannot get proper max packet size\n"); return -EINVAL; } err = line6_create_audio_out_urbs(line6pcm); if (err < 0) return err; err = line6_create_audio_in_urbs(line6pcm); if (err < 0) return err; /* mixer: */ for (i = 0; i < ARRAY_SIZE(line6_controls); i++) { err = snd_ctl_add(line6->card, snd_ctl_new1(&line6_controls[i], line6pcm)); if (err < 0) return err; } return 0; } EXPORT_SYMBOL_GPL(line6_init_pcm); /* prepare pcm callback */ int snd_line6_prepare(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); struct line6_pcm_stream *pstr = get_stream(line6pcm, substream->stream); mutex_lock(&line6pcm->state_mutex); if (!pstr->running) line6_wait_clear_audio_urbs(line6pcm, pstr); if (!test_and_set_bit(LINE6_FLAG_PREPARED, &line6pcm->flags)) { line6pcm->out.count = 0; line6pcm->out.pos = 0; line6pcm->out.pos_done = 0; line6pcm->out.bytes = 0; line6pcm->in.count = 0; line6pcm->in.pos_done = 0; line6pcm->in.bytes = 0; } mutex_unlock(&line6pcm->state_mutex); return 0; }
2 2 1 1 1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 // SPDX-License-Identifier: GPL-2.0-or-later /* * Jeilinj subdriver * * Supports some Jeilin dual-mode cameras which use bulk transport and * download raw JPEG data. * * Copyright (C) 2009 Theodore Kilgore * * Sportscam DV15 support and control settings are * Copyright (C) 2011 Patrice Chotard */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "jeilinj" #include <linux/slab.h> #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/JEILINJ USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define JEILINJ_CMD_TIMEOUT 500 #define JEILINJ_CMD_DELAY 160 #define JEILINJ_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define JEILINJ_MAX_TRANSFER 0x200 #define FRAME_HEADER_LEN 0x10 #define FRAME_START 0xFFFFFFFF enum { SAKAR_57379, SPORTSCAM_DV15, }; #define CAMQUALITY_MIN 0 /* highest cam quality */ #define CAMQUALITY_MAX 97 /* lowest cam quality */ /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ int blocks_left; const struct v4l2_pix_format *cap_mode; struct v4l2_ctrl *freq; struct v4l2_ctrl *jpegqual; /* Driver stuff */ u8 type; u8 quality; /* image quality */ #define QUALITY_MIN 35 #define QUALITY_MAX 85 #define QUALITY_DEF 85 u8 jpeg_hdr[JPEG_HDR_SZ]; }; struct jlj_command { unsigned char instruction[2]; unsigned char ack_wanted; unsigned char delay; }; /* AFAICT these cameras will only do 320x240. */ static struct v4l2_pix_format jlj_mode[] = { { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, { 640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0} }; /* * cam uses endpoint 0x03 to send commands, 0x84 for read commands, * and 0x82 for bulk transfer. */ /* All commands are two bytes only */ static void jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command) { int retval; if (gspca_dev->usb_err < 0) return; memcpy(gspca_dev->usb_buf, command, 2); retval = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 3), gspca_dev->usb_buf, 2, NULL, 500); if (retval < 0) { pr_err("command write [%02x] error %d\n", gspca_dev->usb_buf[0], retval); gspca_dev->usb_err = retval; } } /* Responses are one byte only */ static void jlj_read1(struct gspca_dev *gspca_dev, unsigned char *response) { int retval; if (gspca_dev->usb_err < 0) return; retval = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x84), gspca_dev->usb_buf, 1, NULL, 500); *response = gspca_dev->usb_buf[0]; if (retval < 0) { pr_err("read command [%02x] error %d\n", gspca_dev->usb_buf[0], retval); gspca_dev->usb_err = retval; } } static void setfreq(struct gspca_dev *gspca_dev, s32 val) { u8 freq_commands[][2] = { {0x71, 0x80}, {0x70, 0x07} }; freq_commands[0][1] |= val >> 1; jlj_write2(gspca_dev, freq_commands[0]); jlj_write2(gspca_dev, freq_commands[1]); } static void setcamquality(struct gspca_dev *gspca_dev, s32 val) { u8 quality_commands[][2] = { {0x71, 0x1E}, {0x70, 0x06} }; u8 camquality; /* adapt camera quality from jpeg quality */ camquality = ((QUALITY_MAX - val) * CAMQUALITY_MAX) / (QUALITY_MAX - QUALITY_MIN); quality_commands[0][1] += camquality; jlj_write2(gspca_dev, quality_commands[0]); jlj_write2(gspca_dev, quality_commands[1]); } static void setautogain(struct gspca_dev *gspca_dev, s32 val) { u8 autogain_commands[][2] = { {0x94, 0x02}, {0xcf, 0x00} }; autogain_commands[1][1] = val << 4; jlj_write2(gspca_dev, autogain_commands[0]); jlj_write2(gspca_dev, autogain_commands[1]); } static void setred(struct gspca_dev *gspca_dev, s32 val) { u8 setred_commands[][2] = { {0x94, 0x02}, {0xe6, 0x00} }; setred_commands[1][1] = val; jlj_write2(gspca_dev, setred_commands[0]); jlj_write2(gspca_dev, setred_commands[1]); } static void setgreen(struct gspca_dev *gspca_dev, s32 val) { u8 setgreen_commands[][2] = { {0x94, 0x02}, {0xe7, 0x00} }; setgreen_commands[1][1] = val; jlj_write2(gspca_dev, setgreen_commands[0]); jlj_write2(gspca_dev, setgreen_commands[1]); } static void setblue(struct gspca_dev *gspca_dev, s32 val) { u8 setblue_commands[][2] = { {0x94, 0x02}, {0xe9, 0x00} }; setblue_commands[1][1] = val; jlj_write2(gspca_dev, setblue_commands[0]); jlj_write2(gspca_dev, setblue_commands[1]); } static int jlj_start(struct gspca_dev *gspca_dev) { int i; int start_commands_size; u8 response = 0xff; struct sd *sd = (struct sd *) gspca_dev; struct jlj_command start_commands[] = { {{0x71, 0x81}, 0, 0}, {{0x70, 0x05}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, {{0x71, 0x81 - gspca_dev->curr_mode}, 0, 0}, {{0x70, 0x04}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, {{0x71, 0x00}, 0, 0}, /* start streaming ??*/ {{0x70, 0x08}, 0, JEILINJ_CMD_DELAY}, {{0x95, 0x70}, 1, 0}, #define SPORTSCAM_DV15_CMD_SIZE 9 {{0x94, 0x02}, 0, 0}, {{0xde, 0x24}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xdd, 0xf0}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe3, 0x2c}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe4, 0x00}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe5, 0x00}, 0, 0}, {{0x94, 0x02}, 0, 0}, {{0xe6, 0x2c}, 0, 0}, {{0x94, 0x03}, 0, 0}, {{0xaa, 0x00}, 0, 0} }; sd->blocks_left = 0; /* Under Windows, USB spy shows that only the 9 first start * commands are used for SPORTSCAM_DV15 webcam */ if (sd->type == SPORTSCAM_DV15) start_commands_size = SPORTSCAM_DV15_CMD_SIZE; else start_commands_size = ARRAY_SIZE(start_commands); for (i = 0; i < start_commands_size; i++) { jlj_write2(gspca_dev, start_commands[i].instruction); if (start_commands[i].delay) msleep(start_commands[i].delay); if (start_commands[i].ack_wanted) jlj_read1(gspca_dev, &response); } setcamquality(gspca_dev, v4l2_ctrl_g_ctrl(sd->jpegqual)); msleep(2); setfreq(gspca_dev, v4l2_ctrl_g_ctrl(sd->freq)); if (gspca_dev->usb_err < 0) gspca_err(gspca_dev, "Start streaming command failed\n"); return gspca_dev->usb_err; } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, int len) { struct sd *sd = (struct sd *) gspca_dev; int packet_type; u32 header_marker; gspca_dbg(gspca_dev, D_STREAM, "Got %d bytes out of %d for Block 0\n", len, JEILINJ_MAX_TRANSFER); if (len != JEILINJ_MAX_TRANSFER) { gspca_dbg(gspca_dev, D_PACK, "bad length\n"); goto discard; } /* check if it's start of frame */ header_marker = ((u32 *)data)[0]; if (header_marker == FRAME_START) { sd->blocks_left = data[0x0a] - 1; gspca_dbg(gspca_dev, D_STREAM, "blocks_left = 0x%x\n", sd->blocks_left); /* Start a new frame, and add the JPEG header, first thing */ gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); /* Toss line 0 of data block 0, keep the rest. */ gspca_frame_add(gspca_dev, INTER_PACKET, data + FRAME_HEADER_LEN, JEILINJ_MAX_TRANSFER - FRAME_HEADER_LEN); } else if (sd->blocks_left > 0) { gspca_dbg(gspca_dev, D_STREAM, "%d blocks remaining for frame\n", sd->blocks_left); sd->blocks_left -= 1; if (sd->blocks_left == 0) packet_type = LAST_PACKET; else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, data, JEILINJ_MAX_TRANSFER); } else goto discard; return; discard: /* Discard data until a new frame starts. */ gspca_dev->last_packet_type = DISCARD_PACKET; } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; dev->type = id->driver_info; dev->quality = QUALITY_DEF; cam->cam_mode = jlj_mode; cam->nmodes = ARRAY_SIZE(jlj_mode); cam->bulk = 1; cam->bulk_nurbs = 1; cam->bulk_size = JEILINJ_MAX_TRANSFER; return 0; } static void sd_stopN(struct gspca_dev *gspca_dev) { int i; u8 *buf; static u8 stop_commands[][2] = { {0x71, 0x00}, {0x70, 0x09}, {0x71, 0x80}, {0x70, 0x05} }; for (;;) { /* get the image remaining blocks */ usb_bulk_msg(gspca_dev->dev, gspca_dev->urb[0]->pipe, gspca_dev->urb[0]->transfer_buffer, JEILINJ_MAX_TRANSFER, NULL, JEILINJ_DATA_TIMEOUT); /* search for 0xff 0xd9 (EOF for JPEG) */ i = 0; buf = gspca_dev->urb[0]->transfer_buffer; while ((i < (JEILINJ_MAX_TRANSFER - 1)) && ((buf[i] != 0xff) || (buf[i+1] != 0xd9))) i++; if (i != (JEILINJ_MAX_TRANSFER - 1)) /* last remaining block found */ break; } for (i = 0; i < ARRAY_SIZE(stop_commands); i++) jlj_write2(gspca_dev, stop_commands[i]); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return gspca_dev->usb_err; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* create the JPEG header */ jpeg_define(dev->jpeg_hdr, gspca_dev->pixfmt.height, gspca_dev->pixfmt.width, 0x21); /* JPEG 422 */ jpeg_set_qual(dev->jpeg_hdr, dev->quality); gspca_dbg(gspca_dev, D_STREAM, "Start streaming at %dx%d\n", gspca_dev->pixfmt.height, gspca_dev->pixfmt.width); jlj_start(gspca_dev); return gspca_dev->usb_err; } /* Table of supported USB devices */ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0280), .driver_info = SAKAR_57379}, {USB_DEVICE(0x0979, 0x0270), .driver_info = SPORTSCAM_DV15}, {} }; MODULE_DEVICE_TABLE(usb, device_table); static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *)gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_POWER_LINE_FREQUENCY: setfreq(gspca_dev, ctrl->val); break; case V4L2_CID_RED_BALANCE: setred(gspca_dev, ctrl->val); break; case V4L2_CID_GAIN: setgreen(gspca_dev, ctrl->val); break; case V4L2_CID_BLUE_BALANCE: setblue(gspca_dev, ctrl->val); break; case V4L2_CID_AUTOGAIN: setautogain(gspca_dev, ctrl->val); break; case V4L2_CID_JPEG_COMPRESSION_QUALITY: jpeg_set_qual(sd->jpeg_hdr, ctrl->val); setcamquality(gspca_dev, ctrl->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *)gspca_dev; struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; static const struct v4l2_ctrl_config custom_autogain = { .ops = &sd_ctrl_ops, .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Automatic Gain (and Exposure)", .max = 3, .step = 1, .def = 0, }; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 6); sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops, V4L2_CID_POWER_LINE_FREQUENCY, V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 1, V4L2_CID_POWER_LINE_FREQUENCY_60HZ); v4l2_ctrl_new_custom(hdl, &custom_autogain, NULL); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_RED_BALANCE, 0, 3, 1, 2); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 0, 3, 1, 2); v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_BLUE_BALANCE, 0, 3, 1, 2); sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } return 0; } static int sd_set_jcomp(struct gspca_dev *gspca_dev, const struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality); return 0; } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual); jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } /* sub-driver description */ static const struct sd_desc sd_desc_sakar_57379 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, }; /* sub-driver description */ static const struct sd_desc sd_desc_sportscam_dv15 = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .get_jcomp = sd_get_jcomp, .set_jcomp = sd_set_jcomp, }; static const struct sd_desc *sd_desc[2] = { &sd_desc_sakar_57379, &sd_desc_sportscam_dv15 }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, sd_desc[id->driver_info], sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
59 59 59 2 7 59 2 2 2 2 34 34 24 34 36 39 40 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 /* SPDX-License-Identifier: GPL-2.0 */ /* * Code for manipulating bucket marks for garbage collection. * * Copyright 2014 Datera, Inc. */ #ifndef _BUCKETS_H #define _BUCKETS_H #include "buckets_types.h" #include "extents.h" #include "sb-members.h" static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s) { return div_u64(s, ca->mi.bucket_size); } static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b) { return ((sector_t) b) * ca->mi.bucket_size; } static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s) { u32 remainder; div_u64_rem(s, ca->mi.bucket_size, &remainder); return remainder; } static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset) { return div_u64_rem(s, ca->mi.bucket_size, offset); } #define for_each_bucket(_b, _buckets) \ for (_b = (_buckets)->b + (_buckets)->first_bucket; \ _b < (_buckets)->b + (_buckets)->nbuckets; _b++) static inline void bucket_unlock(struct bucket *b) { BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte); clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock); smp_mb__after_atomic(); wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR); } static inline void bucket_lock(struct bucket *b) { wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR, TASK_UNINTERRUPTIBLE); } static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b) { return bucket_valid(ca, b) ? genradix_ptr(&ca->buckets_gc, b) : NULL; } static inline struct bucket_gens *bucket_gens(struct bch_dev *ca) { return rcu_dereference_check(ca->bucket_gens, lockdep_is_held(&ca->fs->state_lock)); } static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) { struct bucket_gens *gens = bucket_gens(ca); if (b - gens->first_bucket >= gens->nbuckets_minus_first) return NULL; return gens->b + b; } static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b) { u8 *gen = bucket_gen(ca, b); return gen ? *gen : -1; } static inline int bucket_gen_get(struct bch_dev *ca, size_t b) { guard(rcu)(); return bucket_gen_get_rcu(ca, b); } static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, const struct bch_extent_ptr *ptr) { return sector_to_bucket(ca, ptr->offset); } static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca, const struct bch_extent_ptr *ptr) { return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)); } static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca, const struct bch_extent_ptr *ptr, u32 *bucket_offset) { return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset)); } static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca, const struct bch_extent_ptr *ptr) { return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr)); } static inline enum bch_data_type ptr_data_type(const struct bkey *k, const struct bch_extent_ptr *ptr) { if (bkey_is_btree_ptr(k)) return BCH_DATA_btree; return ptr->cached ? BCH_DATA_cached : BCH_DATA_user; } static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p) { EBUG_ON(sectors < 0); return crc_is_compressed(p.crc) ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size, p.crc.uncompressed_size) : sectors; } static inline int gen_cmp(u8 a, u8 b) { return (s8) (a - b); } static inline int gen_after(u8 a, u8 b) { return max(0, gen_cmp(a, b)); } static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) { int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr)); return gen < 0 ? gen : gen_after(gen, ptr->gen); } /** * dev_ptr_stale() - check if a pointer points into a bucket that has been * invalidated. */ static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr) { guard(rcu)(); return dev_ptr_stale_rcu(ca, ptr); } /* Device usage: */ void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *); static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca) { struct bch_dev_usage ret; bch2_dev_usage_read_fast(ca, &ret); return ret; } void bch2_dev_usage_full_read_fast(struct bch_dev *, struct bch_dev_usage_full *); static inline struct bch_dev_usage_full bch2_dev_usage_full_read(struct bch_dev *ca) { struct bch_dev_usage_full ret; bch2_dev_usage_full_read_fast(ca, &ret); return ret; } void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage_full *); static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark) { s64 reserved = 0; switch (watermark) { case BCH_WATERMARK_NR: BUG(); case BCH_WATERMARK_stripe: reserved += ca->mi.nbuckets >> 6; fallthrough; case BCH_WATERMARK_normal: reserved += ca->mi.nbuckets >> 6; fallthrough; case BCH_WATERMARK_copygc: reserved += ca->nr_btree_reserve; fallthrough; case BCH_WATERMARK_btree: reserved += ca->nr_btree_reserve; fallthrough; case BCH_WATERMARK_btree_copygc: case BCH_WATERMARK_reclaim: case BCH_WATERMARK_interior_updates: break; } return reserved; } static inline u64 dev_buckets_free(struct bch_dev *ca, struct bch_dev_usage usage, enum bch_watermark watermark) { return max_t(s64, 0, usage.buckets[BCH_DATA_free]- ca->nr_open_buckets - bch2_dev_buckets_reserved(ca, watermark)); } static inline u64 __dev_buckets_available(struct bch_dev *ca, struct bch_dev_usage usage, enum bch_watermark watermark) { return max_t(s64, 0, usage.buckets[BCH_DATA_free] + usage.buckets[BCH_DATA_cached] + usage.buckets[BCH_DATA_need_gc_gens] + usage.buckets[BCH_DATA_need_discard] - ca->nr_open_buckets - bch2_dev_buckets_reserved(ca, watermark)); } static inline u64 dev_buckets_available(struct bch_dev *ca, enum bch_watermark watermark) { return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark); } /* Filesystem usage: */ struct bch_fs_usage_short bch2_fs_usage_read_short(struct bch_fs *); int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *, struct bkey_s_c, const struct bch_extent_ptr *, s64, enum bch_data_type, u8, u8, u32 *); int bch2_check_fix_ptrs(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, enum btree_iter_update_trigger_flags); int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_s, enum btree_iter_update_trigger_flags); #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\ ({ \ int ret = 0; \ \ if (_old.k->type) \ ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \ if (!ret && _new.k->type) \ ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\ ret; \ }) void bch2_trans_account_disk_usage_change(struct btree_trans *); int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64, enum bch_data_type, unsigned, enum btree_iter_update_trigger_flags); int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *, enum btree_iter_update_trigger_flags); int bch2_trans_mark_dev_sbs_flags(struct bch_fs *, enum btree_iter_update_trigger_flags); int bch2_trans_mark_dev_sbs(struct bch_fs *); bool bch2_is_superblock_bucket(struct bch_dev *, u64); static inline const char *bch2_data_type_str(enum bch_data_type type) { return type < BCH_DATA_NR ? __bch2_data_types[type] : "(invalid data type)"; } /* disk reservations: */ static inline void bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res) { if (res->sectors) { this_cpu_sub(*c->online_reserved, res->sectors); res->sectors = 0; } } enum bch_reservation_flags { BCH_DISK_RESERVATION_NOFAIL = 1 << 0, BCH_DISK_RESERVATION_PARTIAL = 1 << 1, }; int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *, u64, enum bch_reservation_flags); static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, u64 sectors, enum bch_reservation_flags flags) { #ifdef __KERNEL__ u64 old, new; old = this_cpu_read(c->pcpu->sectors_available); do { if (sectors > old) return __bch2_disk_reservation_add(c, res, sectors, flags); new = old - sectors; } while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new)); this_cpu_add(*c->online_reserved, sectors); res->sectors += sectors; return 0; #else return __bch2_disk_reservation_add(c, res, sectors, flags); #endif } static inline struct disk_reservation bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas) { return (struct disk_reservation) { .sectors = 0, #if 0 /* not used yet: */ .gen = c->capacity_gen, #endif .nr_replicas = nr_replicas, }; } static inline int bch2_disk_reservation_get(struct bch_fs *c, struct disk_reservation *res, u64 sectors, unsigned nr_replicas, int flags) { *res = bch2_disk_reservation_init(c, nr_replicas); return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags); } #define RESERVE_FACTOR 6 static inline u64 avail_factor(u64 r) { return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1); } void bch2_buckets_nouse_free(struct bch_fs *); int bch2_buckets_nouse_alloc(struct bch_fs *); int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64); void bch2_dev_buckets_free(struct bch_dev *); int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *); #endif /* _BUCKETS_H */
33 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_E820_API_H #define _ASM_E820_API_H #include <asm/e820/types.h> extern struct e820_table *e820_table; extern struct e820_table *e820_table_kexec; extern struct e820_table *e820_table_firmware; extern unsigned long pci_mem_start; extern bool e820__mapped_raw_any(u64 start, u64 end, enum e820_type type); extern bool e820__mapped_any(u64 start, u64 end, enum e820_type type); extern bool e820__mapped_all(u64 start, u64 end, enum e820_type type); extern void e820__range_add (u64 start, u64 size, enum e820_type type); extern u64 e820__range_update(u64 start, u64 size, enum e820_type old_type, enum e820_type new_type); extern u64 e820__range_remove(u64 start, u64 size, enum e820_type old_type, bool check_type); extern u64 e820__range_update_table(struct e820_table *t, u64 start, u64 size, enum e820_type old_type, enum e820_type new_type); extern void e820__print_table(char *who); extern int e820__update_table(struct e820_table *table); extern void e820__update_table_print(void); extern unsigned long e820__end_of_ram_pfn(void); extern unsigned long e820__end_of_low_ram_pfn(void); extern u64 e820__memblock_alloc_reserved(u64 size, u64 align); extern void e820__memblock_setup(void); extern void e820__finish_early_params(void); extern void e820__reserve_resources(void); extern void e820__reserve_resources_late(void); extern void e820__memory_setup(void); extern void e820__memory_setup_extended(u64 phys_addr, u32 data_len); extern char *e820__memory_setup_default(void); extern void e820__setup_pci_gap(void); extern void e820__reallocate_tables(void); extern void e820__register_nosave_regions(unsigned long limit_pfn); extern int e820__get_entry_type(u64 start, u64 end); /* * Returns true iff the specified range [start,end) is completely contained inside * the ISA region. */ static inline bool is_ISA_range(u64 start, u64 end) { return start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS; } #endif /* _ASM_E820_API_H */
2 5 301 305 305 302 301 757 91 537 554 752 499 518 26 760 617 618 617 402 3 323 615 9 9 9 9 2 2 1 9 1 6 3 297 1 296 2 285 597 24 408 37 294 3 33 599 4 592 604 24 607 613 173 505 50 21 615 62 143 254 230 252 229 1 174 172 230 259 146 8 595 36 607 2 2 610 254 595 197 175 21 182 15 2 194 192 183 13 7 19 2 30 1 7 1 1 22 5 3 21 19 19 5 7 23 148 130 1 22 10 10 2 10 5 3 3 10 4 8 51 51 269 4 222 87 2 6 12 298 2 33 275 85 38 51 235 236 236 266 239 28 48 225 94 84 82 82 73 5 3 71 79 1 24 10 10 8 6 1 6 4 21 1 20 18 11 3 5 6 18 18 6 1 4 3 5 16 3 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 // SPDX-License-Identifier: GPL-2.0-or-later /* Keyring handling * * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/export.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/err.h> #include <linux/user_namespace.h> #include <linux/nsproxy.h> #include <keys/keyring-type.h> #include <keys/user-type.h> #include <linux/assoc_array_priv.h> #include <linux/uaccess.h> #include <net/net_namespace.h> #include "internal.h" /* * When plumbing the depths of the key tree, this sets a hard limit * set on how deep we're willing to go. */ #define KEYRING_SEARCH_MAX_DEPTH 6 /* * We mark pointers we pass to the associative array with bit 1 set if * they're keyrings and clear otherwise. */ #define KEYRING_PTR_SUBTYPE 0x2UL static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) { return (unsigned long)x & KEYRING_PTR_SUBTYPE; } static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) { void *object = assoc_array_ptr_to_leaf(x); return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); } static inline void *keyring_key_to_ptr(struct key *key) { if (key->type == &key_type_keyring) return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); return key; } static DEFINE_RWLOCK(keyring_name_lock); /* * Clean up the bits of user_namespace that belong to us. */ void key_free_user_ns(struct user_namespace *ns) { write_lock(&keyring_name_lock); list_del_init(&ns->keyring_name_list); write_unlock(&keyring_name_lock); key_put(ns->user_keyring_register); #ifdef CONFIG_PERSISTENT_KEYRINGS key_put(ns->persistent_keyring_register); #endif } /* * The keyring key type definition. Keyrings are simply keys of this type and * can be treated as ordinary keys in addition to having their own special * operations. */ static int keyring_preparse(struct key_preparsed_payload *prep); static void keyring_free_preparse(struct key_preparsed_payload *prep); static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); static long keyring_read(const struct key *keyring, char *buffer, size_t buflen); struct key_type key_type_keyring = { .name = "keyring", .def_datalen = 0, .preparse = keyring_preparse, .free_preparse = keyring_free_preparse, .instantiate = keyring_instantiate, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, .read = keyring_read, }; EXPORT_SYMBOL(key_type_keyring); /* * Semaphore to serialise link/link calls to prevent two link calls in parallel * introducing a cycle. */ static DEFINE_MUTEX(keyring_serialise_link_lock); /* * Publish the name of a keyring so that it can be found by name (if it has * one and it doesn't begin with a dot). */ static void keyring_publish_name(struct key *keyring) { struct user_namespace *ns = current_user_ns(); if (keyring->description && keyring->description[0] && keyring->description[0] != '.') { write_lock(&keyring_name_lock); list_add_tail(&keyring->name_link, &ns->keyring_name_list); write_unlock(&keyring_name_lock); } } /* * Preparse a keyring payload */ static int keyring_preparse(struct key_preparsed_payload *prep) { return prep->datalen != 0 ? -EINVAL : 0; } /* * Free a preparse of a user defined key payload */ static void keyring_free_preparse(struct key_preparsed_payload *prep) { } /* * Initialise a keyring. * * Returns 0 on success, -EINVAL if given any data. */ static int keyring_instantiate(struct key *keyring, struct key_preparsed_payload *prep) { assoc_array_init(&keyring->keys); /* make the keyring available by name if it has one */ keyring_publish_name(keyring); return 0; } /* * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd * fold the carry back too, but that requires inline asm. */ static u64 mult_64x32_and_fold(u64 x, u32 y) { u64 hi = (u64)(u32)(x >> 32) * y; u64 lo = (u64)(u32)(x) * y; return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); } /* * Hash a key type and description. */ static void hash_key_type_and_desc(struct keyring_index_key *index_key) { const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; const char *description = index_key->description; unsigned long hash, type; u32 piece; u64 acc; int n, desc_len = index_key->desc_len; type = (unsigned long)index_key->type; acc = mult_64x32_and_fold(type, desc_len + 13); acc = mult_64x32_and_fold(acc, 9207); piece = (unsigned long)index_key->domain_tag; acc = mult_64x32_and_fold(acc, piece); acc = mult_64x32_and_fold(acc, 9207); for (;;) { n = desc_len; if (n <= 0) break; if (n > 4) n = 4; piece = 0; memcpy(&piece, description, n); description += n; desc_len -= n; acc = mult_64x32_and_fold(acc, piece); acc = mult_64x32_and_fold(acc, 9207); } /* Fold the hash down to 32 bits if need be. */ hash = acc; if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) hash ^= acc >> 32; /* Squidge all the keyrings into a separate part of the tree to * ordinary keys by making sure the lowest level segment in the hash is * zero for keyrings and non-zero otherwise. */ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) hash |= (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; else if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) hash = (hash + (hash << level_shift)) & ~fan_mask; index_key->hash = hash; } /* * Finalise an index key to include a part of the description actually in the * index key, to set the domain tag and to calculate the hash. */ void key_set_index_key(struct keyring_index_key *index_key) { static struct key_tag default_domain_tag = { .usage = REFCOUNT_INIT(1), }; size_t n = min_t(size_t, index_key->desc_len, sizeof(index_key->desc)); memcpy(index_key->desc, index_key->description, n); if (!index_key->domain_tag) { if (index_key->type->flags & KEY_TYPE_NET_DOMAIN) index_key->domain_tag = current->nsproxy->net_ns->key_domain; else index_key->domain_tag = &default_domain_tag; } hash_key_type_and_desc(index_key); } /** * key_put_tag - Release a ref on a tag. * @tag: The tag to release. * * This releases a reference the given tag and returns true if that ref was the * last one. */ bool key_put_tag(struct key_tag *tag) { if (refcount_dec_and_test(&tag->usage)) { kfree_rcu(tag, rcu); return true; } return false; } /** * key_remove_domain - Kill off a key domain and gc its keys * @domain_tag: The domain tag to release. * * This marks a domain tag as being dead and releases a ref on it. If that * wasn't the last reference, the garbage collector is poked to try and delete * all keys that were in the domain. */ void key_remove_domain(struct key_tag *domain_tag) { domain_tag->removed = true; if (!key_put_tag(domain_tag)) key_schedule_gc_links(); } /* * Build the next index key chunk. * * We return it one word-sized chunk at a time. */ static unsigned long keyring_get_key_chunk(const void *data, int level) { const struct keyring_index_key *index_key = data; unsigned long chunk = 0; const u8 *d; int desc_len = index_key->desc_len, n = sizeof(chunk); level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; switch (level) { case 0: return index_key->hash; case 1: return index_key->x; case 2: return (unsigned long)index_key->type; case 3: return (unsigned long)index_key->domain_tag; default: level -= 4; if (desc_len <= sizeof(index_key->desc)) return 0; d = index_key->description + sizeof(index_key->desc); d += level * sizeof(long); desc_len -= sizeof(index_key->desc); if (desc_len > n) desc_len = n; do { chunk <<= 8; chunk |= *d++; } while (--desc_len > 0); return chunk; } } static unsigned long keyring_get_object_key_chunk(const void *object, int level) { const struct key *key = keyring_ptr_to_key(object); return keyring_get_key_chunk(&key->index_key, level); } static bool keyring_compare_object(const void *object, const void *data) { const struct keyring_index_key *index_key = data; const struct key *key = keyring_ptr_to_key(object); return key->index_key.type == index_key->type && key->index_key.domain_tag == index_key->domain_tag && key->index_key.desc_len == index_key->desc_len && memcmp(key->index_key.description, index_key->description, index_key->desc_len) == 0; } /* * Compare the index keys of a pair of objects and determine the bit position * at which they differ - if they differ. */ static int keyring_diff_objects(const void *object, const void *data) { const struct key *key_a = keyring_ptr_to_key(object); const struct keyring_index_key *a = &key_a->index_key; const struct keyring_index_key *b = data; unsigned long seg_a, seg_b; int level, i; level = 0; seg_a = a->hash; seg_b = b->hash; if ((seg_a ^ seg_b) != 0) goto differ; level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; /* The number of bits contributed by the hash is controlled by a * constant in the assoc_array headers. Everything else thereafter we * can deal with as being machine word-size dependent. */ seg_a = a->x; seg_b = b->x; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); /* The next bit may not work on big endian */ seg_a = (unsigned long)a->type; seg_b = (unsigned long)b->type; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); seg_a = (unsigned long)a->domain_tag; seg_b = (unsigned long)b->domain_tag; if ((seg_a ^ seg_b) != 0) goto differ; level += sizeof(unsigned long); i = sizeof(a->desc); if (a->desc_len <= i) goto same; for (; i < a->desc_len; i++) { seg_a = *(unsigned char *)(a->description + i); seg_b = *(unsigned char *)(b->description + i); if ((seg_a ^ seg_b) != 0) goto differ_plus_i; } same: return -1; differ_plus_i: level += i; differ: i = level * 8 + __ffs(seg_a ^ seg_b); return i; } /* * Free an object after stripping the keyring flag off of the pointer. */ static void keyring_free_object(void *object) { key_put(keyring_ptr_to_key(object)); } /* * Operations for keyring management by the index-tree routines. */ static const struct assoc_array_ops keyring_assoc_array_ops = { .get_key_chunk = keyring_get_key_chunk, .get_object_key_chunk = keyring_get_object_key_chunk, .compare_object = keyring_compare_object, .diff_objects = keyring_diff_objects, .free_object = keyring_free_object, }; /* * Clean up a keyring when it is destroyed. Unpublish its name if it had one * and dispose of its data. * * The garbage collector detects the final key_put(), removes the keyring from * the serial number tree and then does RCU synchronisation before coming here, * so we shouldn't need to worry about code poking around here with the RCU * readlock held by this time. */ static void keyring_destroy(struct key *keyring) { if (keyring->description) { write_lock(&keyring_name_lock); if (keyring->name_link.next != NULL && !list_empty(&keyring->name_link)) list_del(&keyring->name_link); write_unlock(&keyring_name_lock); } if (keyring->restrict_link) { struct key_restriction *keyres = keyring->restrict_link; key_put(keyres->key); kfree(keyres); } assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); } /* * Describe a keyring for /proc. */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); if (key_is_positive(keyring)) { if (keyring->keys.nr_leaves_on_tree != 0) seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); else seq_puts(m, ": empty"); } } struct keyring_read_iterator_context { size_t buflen; size_t count; key_serial_t *buffer; }; static int keyring_read_iterator(const void *object, void *data) { struct keyring_read_iterator_context *ctx = data; const struct key *key = keyring_ptr_to_key(object); kenter("{%s,%d},,{%zu/%zu}", key->type->name, key->serial, ctx->count, ctx->buflen); if (ctx->count >= ctx->buflen) return 1; *ctx->buffer++ = key->serial; ctx->count += sizeof(key->serial); return 0; } /* * Read a list of key IDs from the keyring's contents in binary form * * The keyring's semaphore is read-locked by the caller. This prevents someone * from modifying it under us - which could cause us to read key IDs multiple * times. */ static long keyring_read(const struct key *keyring, char *buffer, size_t buflen) { struct keyring_read_iterator_context ctx; long ret; kenter("{%d},,%zu", key_serial(keyring), buflen); if (buflen & (sizeof(key_serial_t) - 1)) return -EINVAL; /* Copy as many key IDs as fit into the buffer */ if (buffer && buflen) { ctx.buffer = (key_serial_t *)buffer; ctx.buflen = buflen; ctx.count = 0; ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); if (ret < 0) { kleave(" = %ld [iterate]", ret); return ret; } } /* Return the size of the buffer needed */ ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t); if (ret <= buflen) kleave("= %ld [ok]", ret); else kleave("= %ld [buffer too small]", ret); return ret; } /* * Allocate a keyring and link into the destination keyring. */ struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags, struct key_restriction *restrict_link, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, uid, gid, cred, perm, flags, restrict_link); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); if (ret < 0) { key_put(keyring); keyring = ERR_PTR(ret); } } return keyring; } EXPORT_SYMBOL(keyring_alloc); /** * restrict_link_reject - Give -EPERM to restrict link * @keyring: The keyring being added to. * @type: The type of key being added. * @payload: The payload of the key intended to be added. * @restriction_key: Keys providing additional data for evaluating restriction. * * Reject the addition of any links to a keyring. It can be overridden by * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when * adding a key to a keyring. * * This is meant to be stored in a key_restriction structure which is passed * in the restrict_link parameter to keyring_alloc(). */ int restrict_link_reject(struct key *keyring, const struct key_type *type, const union key_payload *payload, struct key *restriction_key) { return -EPERM; } /* * By default, we keys found by getting an exact match on their descriptions. */ bool key_default_cmp(const struct key *key, const struct key_match_data *match_data) { return strcmp(key->description, match_data->raw_data) == 0; } /* * Iteration function to consider each key found. */ static int keyring_search_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); unsigned long kflags = READ_ONCE(key->flags); short state = READ_ONCE(key->state); kenter("{%d}", key->serial); /* ignore keys not of this type */ if (key->type != ctx->index_key.type) { kleave(" = 0 [!type]"); return 0; } /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { time64_t expiry = READ_ONCE(key->expiry); if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { ctx->result = ERR_PTR(-EKEYREVOKED); kleave(" = %d [invrev]", ctx->skipped_ret); goto skipped; } if (expiry && ctx->now >= expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); goto skipped; } } /* keys that don't match */ if (!ctx->match_data.cmp(key, &ctx->match_data)) { kleave(" = 0 [!match]"); return 0; } /* key must have search permissions */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) { ctx->result = ERR_PTR(-EACCES); kleave(" = %d [!perm]", ctx->skipped_ret); goto skipped; } if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { /* we set a different error code if we pass a negative key */ if (state < 0) { ctx->result = ERR_PTR(state); kleave(" = %d [neg]", ctx->skipped_ret); goto skipped; } } /* Found */ ctx->result = make_key_ref(key, ctx->possessed); kleave(" = 1 [found]"); return 1; skipped: return ctx->skipped_ret; } /* * Search inside a keyring for a key. We can search by walking to it * directly based on its index-key or we can iterate over the entire * tree looking for it, based on the match function. */ static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) { if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) { const void *object; object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, &ctx->index_key); return object ? ctx->iterator(object, ctx) : 0; } return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); } /* * Search a tree of keyrings that point to other keyrings up to the maximum * depth. */ static bool search_nested_keyrings(struct key *keyring, struct keyring_search_context *ctx) { struct { struct key *keyring; struct assoc_array_node *node; int slot; } stack[KEYRING_SEARCH_MAX_DEPTH]; struct assoc_array_shortcut *shortcut; struct assoc_array_node *node; struct assoc_array_ptr *ptr; struct key *key; int sp = 0, slot; kenter("{%d},{%s,%s}", keyring->serial, ctx->index_key.type->name, ctx->index_key.description); #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK) BUG_ON((ctx->flags & STATE_CHECKS) == 0 || (ctx->flags & STATE_CHECKS) == STATE_CHECKS); if (ctx->index_key.description) key_set_index_key(&ctx->index_key); /* Check to see if this top-level keyring is what we are looking for * and whether it is valid or not. */ if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || keyring_compare_object(keyring, &ctx->index_key)) { ctx->skipped_ret = 2; switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { case 1: goto found; case 2: return false; default: break; } } ctx->skipped_ret = 0; /* Start processing a new keyring */ descend_to_keyring: kdebug("descend to %d", keyring->serial); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto not_this_keyring; /* Search through the keys in this keyring before its searching its * subtrees. */ if (search_keyring(keyring, ctx)) goto found; /* Then manually iterate through the keyrings nested in this one. * * Start from the root node of the index tree. Because of the way the * hash function has been set up, keyrings cluster on the leftmost * branch of the root node (root slot 0) or in the root node itself. * Non-keyrings avoid the leftmost branch of the root entirely (root * slots 1-15). */ if (!(ctx->flags & KEYRING_SEARCH_RECURSE)) goto not_this_keyring; ptr = READ_ONCE(keyring->keys.root); if (!ptr) goto not_this_keyring; if (assoc_array_ptr_is_shortcut(ptr)) { /* If the root is a shortcut, either the keyring only contains * keyring pointers (everything clusters behind root slot 0) or * doesn't contain any keyring pointers. */ shortcut = assoc_array_ptr_to_shortcut(ptr); if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) goto not_this_keyring; ptr = READ_ONCE(shortcut->next_node); node = assoc_array_ptr_to_node(ptr); goto begin_node; } node = assoc_array_ptr_to_node(ptr); ptr = node->slots[0]; if (!assoc_array_ptr_is_meta(ptr)) goto begin_node; descend_to_node: /* Descend to a more distal node in this keyring's content tree and go * through that. */ kdebug("descend"); if (assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); ptr = READ_ONCE(shortcut->next_node); BUG_ON(!assoc_array_ptr_is_node(ptr)); } node = assoc_array_ptr_to_node(ptr); begin_node: kdebug("begin_node"); slot = 0; ascend_to_node: /* Go through the slots in a node */ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { ptr = READ_ONCE(node->slots[slot]); if (assoc_array_ptr_is_meta(ptr)) { if (node->back_pointer || assoc_array_ptr_is_shortcut(ptr)) goto descend_to_node; } if (!keyring_ptr_is_keyring(ptr)) continue; key = keyring_ptr_to_key(ptr); if (sp >= KEYRING_SEARCH_MAX_DEPTH) { if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { ctx->result = ERR_PTR(-ELOOP); return false; } goto not_this_keyring; } /* Search a nested keyring */ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && key_task_permission(make_key_ref(key, ctx->possessed), ctx->cred, KEY_NEED_SEARCH) < 0) continue; /* stack the current position */ stack[sp].keyring = keyring; stack[sp].node = node; stack[sp].slot = slot; sp++; /* begin again with the new keyring */ keyring = key; goto descend_to_keyring; } /* We've dealt with all the slots in the current node, so now we need * to ascend to the parent and continue processing there. */ ptr = READ_ONCE(node->back_pointer); slot = node->parent_slot; if (ptr && assoc_array_ptr_is_shortcut(ptr)) { shortcut = assoc_array_ptr_to_shortcut(ptr); ptr = READ_ONCE(shortcut->back_pointer); slot = shortcut->parent_slot; } if (!ptr) goto not_this_keyring; node = assoc_array_ptr_to_node(ptr); slot++; /* If we've ascended to the root (zero backpointer), we must have just * finished processing the leftmost branch rather than the root slots - * so there can't be any more keyrings for us to find. */ if (node->back_pointer) { kdebug("ascend %d", slot); goto ascend_to_node; } /* The keyring we're looking at was disqualified or didn't contain a * matching key. */ not_this_keyring: kdebug("not_this_keyring %d", sp); if (sp <= 0) { kleave(" = false"); return false; } /* Resume the processing of a keyring higher up in the tree */ sp--; keyring = stack[sp].keyring; node = stack[sp].node; slot = stack[sp].slot + 1; kdebug("ascend to %d [%d]", keyring->serial, slot); goto ascend_to_node; /* We found a viable match */ found: key = key_ref_to_ptr(ctx->result); key_check(key); if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { key->last_used_at = ctx->now; keyring->last_used_at = ctx->now; while (sp > 0) stack[--sp].keyring->last_used_at = ctx->now; } kleave(" = true"); return true; } /** * keyring_search_rcu - Search a keyring tree for a matching key under RCU * @keyring_ref: A pointer to the keyring with possession indicator. * @ctx: The keyring search context. * * Search the supplied keyring tree for a key that matches the criteria given. * The root keyring and any linked keyrings must grant Search permission to the * caller to be searchable and keys can only be found if they too grant Search * to the caller. The possession flag on the root keyring pointer controls use * of the possessor bits in permissions checking of the entire tree. In * addition, the LSM gets to forbid keyring searches and key matches. * * The search is performed as a breadth-then-depth search up to the prescribed * limit (KEYRING_SEARCH_MAX_DEPTH). The caller must hold the RCU read lock to * prevent keyrings from being destroyed or rearranged whilst they are being * searched. * * Keys are matched to the type provided and are then filtered by the match * function, which is given the description to use in any way it sees fit. The * match function may use any attributes of a key that it wishes to * determine the match. Normally the match function from the key type would be * used. * * RCU can be used to prevent the keyring key lists from disappearing without * the need to take lots of locks. * * Returns a pointer to the found key and increments the key usage count if * successful; -EAGAIN if no matching keys were found, or if expired or revoked * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the * specified keyring wasn't a keyring. * * In the case of a successful return, the possession attribute from * @keyring_ref is propagated to the returned key reference. */ key_ref_t keyring_search_rcu(key_ref_t keyring_ref, struct keyring_search_context *ctx) { struct key *keyring; long err; ctx->iterator = keyring_search_iterator; ctx->possessed = is_key_possessed(keyring_ref); ctx->result = ERR_PTR(-EAGAIN); keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); if (keyring->type != &key_type_keyring) return ERR_PTR(-ENOTDIR); if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH); if (err < 0) return ERR_PTR(err); } ctx->now = ktime_get_real_seconds(); if (search_nested_keyrings(keyring, ctx)) __key_get(key_ref_to_ptr(ctx->result)); return ctx->result; } /** * keyring_search - Search the supplied keyring tree for a matching key * @keyring: The root of the keyring tree to be searched. * @type: The type of keyring we want to find. * @description: The name of the keyring we want to find. * @recurse: True to search the children of @keyring also * * As keyring_search_rcu() above, but using the current task's credentials and * type's default matching function and preferred search method. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description, bool recurse) { struct keyring_search_context ctx = { .index_key.type = type, .index_key.description = description, .index_key.desc_len = strlen(description), .cred = current_cred(), .match_data.cmp = key_default_cmp, .match_data.raw_data = description, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .flags = KEYRING_SEARCH_DO_STATE_CHECK, }; key_ref_t key; int ret; if (recurse) ctx.flags |= KEYRING_SEARCH_RECURSE; if (type->match_preparse) { ret = type->match_preparse(&ctx.match_data); if (ret < 0) return ERR_PTR(ret); } rcu_read_lock(); key = keyring_search_rcu(keyring, &ctx); rcu_read_unlock(); if (type->match_free) type->match_free(&ctx.match_data); return key; } EXPORT_SYMBOL(keyring_search); static struct key_restriction *keyring_restriction_alloc( key_restrict_link_func_t check) { struct key_restriction *keyres = kzalloc(sizeof(struct key_restriction), GFP_KERNEL); if (!keyres) return ERR_PTR(-ENOMEM); keyres->check = check; return keyres; } /* * Semaphore to serialise restriction setup to prevent reference count * cycles through restriction key pointers. */ static DECLARE_RWSEM(keyring_serialise_restrict_sem); /* * Check for restriction cycles that would prevent keyring garbage collection. * keyring_serialise_restrict_sem must be held. */ static bool keyring_detect_restriction_cycle(const struct key *dest_keyring, struct key_restriction *keyres) { while (keyres && keyres->key && keyres->key->type == &key_type_keyring) { if (keyres->key == dest_keyring) return true; keyres = keyres->key->restrict_link; } return false; } /** * keyring_restrict - Look up and apply a restriction to a keyring * @keyring_ref: The keyring to be restricted * @type: The key type that will provide the restriction checker. * @restriction: The restriction options to apply to the keyring * * Look up a keyring and apply a restriction to it. The restriction is managed * by the specific key type, but can be configured by the options specified in * the restriction string. */ int keyring_restrict(key_ref_t keyring_ref, const char *type, const char *restriction) { struct key *keyring; struct key_type *restrict_type = NULL; struct key_restriction *restrict_link; int ret = 0; keyring = key_ref_to_ptr(keyring_ref); key_check(keyring); if (keyring->type != &key_type_keyring) return -ENOTDIR; if (!type) { restrict_link = keyring_restriction_alloc(restrict_link_reject); } else { restrict_type = key_type_lookup(type); if (IS_ERR(restrict_type)) return PTR_ERR(restrict_type); if (!restrict_type->lookup_restriction) { ret = -ENOENT; goto error; } restrict_link = restrict_type->lookup_restriction(restriction); } if (IS_ERR(restrict_link)) { ret = PTR_ERR(restrict_link); goto error; } down_write(&keyring->sem); down_write(&keyring_serialise_restrict_sem); if (keyring->restrict_link) { ret = -EEXIST; } else if (keyring_detect_restriction_cycle(keyring, restrict_link)) { ret = -EDEADLK; } else { keyring->restrict_link = restrict_link; notify_key(keyring, NOTIFY_KEY_SETATTR, 0); } up_write(&keyring_serialise_restrict_sem); up_write(&keyring->sem); if (ret < 0) { key_put(restrict_link->key); kfree(restrict_link); } error: if (restrict_type) key_type_put(restrict_type); return ret; } EXPORT_SYMBOL(keyring_restrict); /* * Search the given keyring for a key that might be updated. * * The caller must guarantee that the keyring is a keyring and that the * permission is granted to modify the keyring as no check is made here. The * caller must also hold a lock on the keyring semaphore. * * Returns a pointer to the found key with usage count incremented if * successful and returns NULL if not found. Revoked and invalidated keys are * skipped over. * * If successful, the possession indicator is propagated from the keyring ref * to the returned key reference. */ key_ref_t find_key_to_update(key_ref_t keyring_ref, const struct keyring_index_key *index_key) { struct key *keyring, *key; const void *object; keyring = key_ref_to_ptr(keyring_ref); kenter("{%d},{%s,%s}", keyring->serial, index_key->type->name, index_key->description); object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, index_key); if (object) goto found; kleave(" = NULL"); return NULL; found: key = keyring_ptr_to_key(object); if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { kleave(" = NULL [x]"); return NULL; } __key_get(key); kleave(" = {%d}", key->serial); return make_key_ref(key, is_key_possessed(keyring_ref)); } /* * Find a keyring with the specified name. * * Only keyrings that have nonzero refcount, are not revoked, and are owned by a * user in the current user namespace are considered. If @uid_keyring is %true, * the keyring additionally must have been allocated as a user or user session * keyring; otherwise, it must grant Search permission directly to the caller. * * Returns a pointer to the keyring with the keyring's refcount having being * incremented on success. -ENOKEY is returned if a key could not be found. */ struct key *find_keyring_by_name(const char *name, bool uid_keyring) { struct user_namespace *ns = current_user_ns(); struct key *keyring; if (!name) return ERR_PTR(-EINVAL); read_lock(&keyring_name_lock); /* Search this hash bucket for a keyring with a matching name that * grants Search permission and that hasn't been revoked */ list_for_each_entry(keyring, &ns->keyring_name_list, name_link) { if (!kuid_has_mapping(ns, keyring->user->uid)) continue; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) continue; if (strcmp(keyring->description, name) != 0) continue; if (uid_keyring) { if (!test_bit(KEY_FLAG_UID_KEYRING, &keyring->flags)) continue; } else { if (key_permission(make_key_ref(keyring, 0), KEY_NEED_SEARCH) < 0) continue; } /* we've got a match but we might end up racing with * key_cleanup() if the keyring is currently 'dead' * (ie. it has a zero usage count) */ if (!refcount_inc_not_zero(&keyring->usage)) continue; keyring->last_used_at = ktime_get_real_seconds(); goto out; } keyring = ERR_PTR(-ENOKEY); out: read_unlock(&keyring_name_lock); return keyring; } static int keyring_detect_cycle_iterator(const void *object, void *iterator_data) { struct keyring_search_context *ctx = iterator_data; const struct key *key = keyring_ptr_to_key(object); kenter("{%d}", key->serial); /* We might get a keyring with matching index-key that is nonetheless a * different keyring. */ if (key != ctx->match_data.raw_data) return 0; ctx->result = ERR_PTR(-EDEADLK); return 1; } /* * See if a cycle will be created by inserting acyclic tree B in acyclic * tree A at the topmost level (ie: as a direct child of A). * * Since we are adding B to A at the top level, checking for cycles should just * be a matter of seeing if node A is somewhere in tree B. */ static int keyring_detect_cycle(struct key *A, struct key *B) { struct keyring_search_context ctx = { .index_key = A->index_key, .match_data.raw_data = A, .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, .iterator = keyring_detect_cycle_iterator, .flags = (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_NO_UPDATE_TIME | KEYRING_SEARCH_NO_CHECK_PERM | KEYRING_SEARCH_DETECT_TOO_DEEP | KEYRING_SEARCH_RECURSE), }; rcu_read_lock(); search_nested_keyrings(B, &ctx); rcu_read_unlock(); return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); } /* * Lock keyring for link. */ int __key_link_lock(struct key *keyring, const struct keyring_index_key *index_key) __acquires(&keyring->sem) __acquires(&keyring_serialise_link_lock) { if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); /* Serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders. */ if (index_key->type == &key_type_keyring) mutex_lock(&keyring_serialise_link_lock); return 0; } /* * Lock keyrings for move (link/unlink combination). */ int __key_move_lock(struct key *l_keyring, struct key *u_keyring, const struct keyring_index_key *index_key) __acquires(&l_keyring->sem) __acquires(&u_keyring->sem) __acquires(&keyring_serialise_link_lock) { if (l_keyring->type != &key_type_keyring || u_keyring->type != &key_type_keyring) return -ENOTDIR; /* We have to be very careful here to take the keyring locks in the * right order, lest we open ourselves to deadlocking against another * move operation. */ if (l_keyring < u_keyring) { down_write(&l_keyring->sem); down_write_nested(&u_keyring->sem, 1); } else { down_write(&u_keyring->sem); down_write_nested(&l_keyring->sem, 1); } /* Serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders. */ if (index_key->type == &key_type_keyring) mutex_lock(&keyring_serialise_link_lock); return 0; } /* * Preallocate memory so that a key can be linked into to a keyring. */ int __key_link_begin(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit **_edit) { struct assoc_array_edit *edit; int ret; kenter("%d,%s,%s,", keyring->serial, index_key->type->name, index_key->description); BUG_ON(index_key->desc_len == 0); BUG_ON(*_edit != NULL); *_edit = NULL; ret = -EKEYREVOKED; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) goto error; /* Create an edit script that will insert/replace the key in the * keyring tree. */ edit = assoc_array_insert(&keyring->keys, &keyring_assoc_array_ops, index_key, NULL); if (IS_ERR(edit)) { ret = PTR_ERR(edit); goto error; } /* If we're not replacing a link in-place then we're going to need some * extra quota. */ if (!edit->dead_leaf) { ret = key_payload_reserve(keyring, keyring->datalen + KEYQUOTA_LINK_BYTES); if (ret < 0) goto error_cancel; } *_edit = edit; kleave(" = 0"); return 0; error_cancel: assoc_array_cancel_edit(edit); error: kleave(" = %d", ret); return ret; } /* * Check already instantiated keys aren't going to be a problem. * * The caller must have called __key_link_begin(). Don't need to call this for * keys that were created since __key_link_begin() was called. */ int __key_link_check_live_key(struct key *keyring, struct key *key) { if (key->type == &key_type_keyring) /* check that we aren't going to create a cycle by linking one * keyring to another */ return keyring_detect_cycle(keyring, key); return 0; } /* * Link a key into to a keyring. * * Must be called with __key_link_begin() having being called. Discards any * already extant link to matching key if there is one, so that each keyring * holds at most one link to any given key of a particular type+description * combination. */ void __key_link(struct key *keyring, struct key *key, struct assoc_array_edit **_edit) { __key_get(key); assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); assoc_array_apply_edit(*_edit); *_edit = NULL; notify_key(keyring, NOTIFY_KEY_LINKED, key_serial(key)); } /* * Finish linking a key into to a keyring. * * Must be called with __key_link_begin() having being called. */ void __key_link_end(struct key *keyring, const struct keyring_index_key *index_key, struct assoc_array_edit *edit) __releases(&keyring->sem) __releases(&keyring_serialise_link_lock) { BUG_ON(index_key->type == NULL); kenter("%d,%s,", keyring->serial, index_key->type->name); if (edit) { if (!edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } assoc_array_cancel_edit(edit); } up_write(&keyring->sem); if (index_key->type == &key_type_keyring) mutex_unlock(&keyring_serialise_link_lock); } /* * Check addition of keys to restricted keyrings. */ static int __key_link_check_restriction(struct key *keyring, struct key *key) { if (!keyring->restrict_link || !keyring->restrict_link->check) return 0; return keyring->restrict_link->check(keyring, key->type, &key->payload, keyring->restrict_link->key); } /** * key_link - Link a key to a keyring * @keyring: The keyring to make the link in. * @key: The key to link to. * * Make a link in a keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring. * * This function will write-lock the keyring's semaphore and will consume some * of the user's key data quota to hold the link. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is * full, -EDQUOT if there is insufficient key data quota remaining to add * another link or -ENOMEM if there's insufficient memory. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_link(struct key *keyring, struct key *key) { struct assoc_array_edit *edit = NULL; int ret; kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage)); key_check(keyring); key_check(key); ret = __key_link_lock(keyring, &key->index_key); if (ret < 0) goto error; ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) goto error_end; kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage)); ret = __key_link_check_restriction(keyring, key); if (ret == 0) ret = __key_link_check_live_key(keyring, key); if (ret == 0) __key_link(keyring, key, &edit); error_end: __key_link_end(keyring, &key->index_key, edit); error: kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage)); return ret; } EXPORT_SYMBOL(key_link); /* * Lock a keyring for unlink. */ static int __key_unlink_lock(struct key *keyring) __acquires(&keyring->sem) { if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); return 0; } /* * Begin the process of unlinking a key from a keyring. */ static int __key_unlink_begin(struct key *keyring, struct key *key, struct assoc_array_edit **_edit) { struct assoc_array_edit *edit; BUG_ON(*_edit != NULL); edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, &key->index_key); if (IS_ERR(edit)) return PTR_ERR(edit); if (!edit) return -ENOENT; *_edit = edit; return 0; } /* * Apply an unlink change. */ static void __key_unlink(struct key *keyring, struct key *key, struct assoc_array_edit **_edit) { assoc_array_apply_edit(*_edit); notify_key(keyring, NOTIFY_KEY_UNLINKED, key_serial(key)); *_edit = NULL; key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); } /* * Finish unlinking a key from to a keyring. */ static void __key_unlink_end(struct key *keyring, struct key *key, struct assoc_array_edit *edit) __releases(&keyring->sem) { if (edit) assoc_array_cancel_edit(edit); up_write(&keyring->sem); } /** * key_unlink - Unlink the first link to a key from a keyring. * @keyring: The keyring to remove the link from. * @key: The key the link is to. * * Remove a link from a keyring to a key. * * This function will write-lock the keyring's semaphore. * * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if * the key isn't linked to by the keyring or -ENOMEM if there's insufficient * memory. * * It is assumed that the caller has checked that it is permitted for a link to * be removed (the keyring should have Write permission; no permissions are * required on the key). */ int key_unlink(struct key *keyring, struct key *key) { struct assoc_array_edit *edit = NULL; int ret; key_check(keyring); key_check(key); ret = __key_unlink_lock(keyring); if (ret < 0) return ret; ret = __key_unlink_begin(keyring, key, &edit); if (ret == 0) __key_unlink(keyring, key, &edit); __key_unlink_end(keyring, key, edit); return ret; } EXPORT_SYMBOL(key_unlink); /** * key_move - Move a key from one keyring to another * @key: The key to move * @from_keyring: The keyring to remove the link from. * @to_keyring: The keyring to make the link in. * @flags: Qualifying flags, such as KEYCTL_MOVE_EXCL. * * Make a link in @to_keyring to a key, such that the keyring holds a reference * on that key and the key can potentially be found by searching that keyring * whilst simultaneously removing a link to the key from @from_keyring. * * This function will write-lock both keyring's semaphores and will consume * some of the user's key data quota to hold the link on @to_keyring. * * Returns 0 if successful, -ENOTDIR if either keyring isn't a keyring, * -EKEYREVOKED if either keyring has been revoked, -ENFILE if the second * keyring is full, -EDQUOT if there is insufficient key data quota remaining * to add another link or -ENOMEM if there's insufficient memory. If * KEYCTL_MOVE_EXCL is set, then -EEXIST will be returned if there's already a * matching key in @to_keyring. * * It is assumed that the caller has checked that it is permitted for a link to * be made (the keyring should have Write permission and the key Link * permission). */ int key_move(struct key *key, struct key *from_keyring, struct key *to_keyring, unsigned int flags) { struct assoc_array_edit *from_edit = NULL, *to_edit = NULL; int ret; kenter("%d,%d,%d", key->serial, from_keyring->serial, to_keyring->serial); if (from_keyring == to_keyring) return 0; key_check(key); key_check(from_keyring); key_check(to_keyring); ret = __key_move_lock(from_keyring, to_keyring, &key->index_key); if (ret < 0) goto out; ret = __key_unlink_begin(from_keyring, key, &from_edit); if (ret < 0) goto error; ret = __key_link_begin(to_keyring, &key->index_key, &to_edit); if (ret < 0) goto error; ret = -EEXIST; if (to_edit->dead_leaf && (flags & KEYCTL_MOVE_EXCL)) goto error; ret = __key_link_check_restriction(to_keyring, key); if (ret < 0) goto error; ret = __key_link_check_live_key(to_keyring, key); if (ret < 0) goto error; __key_unlink(from_keyring, key, &from_edit); __key_link(to_keyring, key, &to_edit); error: __key_link_end(to_keyring, &key->index_key, to_edit); __key_unlink_end(from_keyring, key, from_edit); out: kleave(" = %d", ret); return ret; } EXPORT_SYMBOL(key_move); /** * keyring_clear - Clear a keyring * @keyring: The keyring to clear. * * Clear the contents of the specified keyring. * * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. */ int keyring_clear(struct key *keyring) { struct assoc_array_edit *edit; int ret; if (keyring->type != &key_type_keyring) return -ENOTDIR; down_write(&keyring->sem); edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (IS_ERR(edit)) { ret = PTR_ERR(edit); } else { if (edit) assoc_array_apply_edit(edit); notify_key(keyring, NOTIFY_KEY_CLEARED, 0); key_payload_reserve(keyring, 0); ret = 0; } up_write(&keyring->sem); return ret; } EXPORT_SYMBOL(keyring_clear); /* * Dispose of the links from a revoked keyring. * * This is called with the key sem write-locked. */ static void keyring_revoke(struct key *keyring) { struct assoc_array_edit *edit; edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); if (!IS_ERR(edit)) { if (edit) assoc_array_apply_edit(edit); key_payload_reserve(keyring, 0); } } static bool keyring_gc_select_iterator(void *object, void *iterator_data) { struct key *key = keyring_ptr_to_key(object); time64_t *limit = iterator_data; if (key_is_dead(key, *limit)) return false; key_get(key); return true; } static int keyring_gc_check_iterator(const void *object, void *iterator_data) { const struct key *key = keyring_ptr_to_key(object); time64_t *limit = iterator_data; key_check(key); return key_is_dead(key, *limit); } /* * Garbage collect pointers from a keyring. * * Not called with any locks held. The keyring's key struct will not be * deallocated under us as only our caller may deallocate it. */ void keyring_gc(struct key *keyring, time64_t limit) { int result; kenter("%x{%s}", keyring->serial, keyring->description ?: ""); if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) goto dont_gc; /* scan the keyring looking for dead keys */ rcu_read_lock(); result = assoc_array_iterate(&keyring->keys, keyring_gc_check_iterator, &limit); rcu_read_unlock(); if (result == true) goto do_gc; dont_gc: kleave(" [no gc]"); return; do_gc: down_write(&keyring->sem); assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, keyring_gc_select_iterator, &limit); up_write(&keyring->sem); kleave(" [gc]"); } /* * Garbage collect restriction pointers from a keyring. * * Keyring restrictions are associated with a key type, and must be cleaned * up if the key type is unregistered. The restriction is altered to always * reject additional keys so a keyring cannot be opened up by unregistering * a key type. * * Not called with any keyring locks held. The keyring's key struct will not * be deallocated under us as only our caller may deallocate it. * * The caller is required to hold key_types_sem and dead_type->sem. This is * fulfilled by key_gc_keytype() holding the locks on behalf of * key_garbage_collector(), which it invokes on a workqueue. */ void keyring_restriction_gc(struct key *keyring, struct key_type *dead_type) { struct key_restriction *keyres; kenter("%x{%s}", keyring->serial, keyring->description ?: ""); /* * keyring->restrict_link is only assigned at key allocation time * or with the key type locked, so the only values that could be * concurrently assigned to keyring->restrict_link are for key * types other than dead_type. Given this, it's ok to check * the key type before acquiring keyring->sem. */ if (!dead_type || !keyring->restrict_link || keyring->restrict_link->keytype != dead_type) { kleave(" [no restriction gc]"); return; } /* Lock the keyring to ensure that a link is not in progress */ down_write(&keyring->sem); keyres = keyring->restrict_link; keyres->check = restrict_link_reject; key_put(keyres->key); keyres->key = NULL; keyres->keytype = NULL; up_write(&keyring->sem); kleave(" [restriction gc]"); }
81 81 81 81 81 80 80 80 2 1 80 80 81 81 5 5 76 76 29 1 28 28 1 75 14 76 76 81 81 81 5 69 2 2 76 76 5 71 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 // SPDX-License-Identifier: GPL-2.0-or-later /* * Forwarding decision * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> */ #include <linux/err.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> #include <linux/netfilter_bridge.h> #include "br_private.h" /* Don't forward packets to originating port or forwarding disabled */ static inline int should_deliver(const struct net_bridge_port *p, const struct sk_buff *skb) { struct net_bridge_vlan_group *vg; vg = nbp_vlan_group_rcu(p); return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && !br_skb_isolated(p, skb); } int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) { skb_push(skb, ETH_HLEN); if (!is_skb_forwardable(skb->dev, skb)) goto drop; br_drop_fake_rtable(skb); if (skb->ip_summed == CHECKSUM_PARTIAL && eth_type_vlan(skb->protocol)) { int depth; if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth)) goto drop; skb_set_network_header(skb, depth); } br_switchdev_frame_set_offload_fwd_mark(skb); dev_queue_xmit(skb); return 0; drop: kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { skb_clear_tstamp(skb); return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, net, sk, skb, NULL, skb->dev, br_dev_queue_push_xmit); } EXPORT_SYMBOL_GPL(br_forward_finish); static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb, bool local_orig) { struct net_bridge_vlan_group *vg; struct net_device *indev; struct net *net; int br_hook; /* Mark the skb for forwarding offload early so that br_handle_vlan() * can know whether to pop the VLAN header on egress or keep it. */ nbp_switchdev_frame_mark_tx_fwd_offload(to, skb); vg = nbp_vlan_group_rcu(to); skb = br_handle_vlan(to->br, to, vg, skb); if (!skb) return; indev = skb->dev; skb->dev = to->dev; if (!local_orig) { if (skb_warn_if_lro(skb)) { kfree_skb(skb); return; } br_hook = NF_BR_FORWARD; skb_forward_csum(skb); net = dev_net(indev); } else { if (unlikely(netpoll_tx_running(to->br->dev))) { skb_push(skb, ETH_HLEN); if (!is_skb_forwardable(skb->dev, skb)) kfree_skb(skb); else br_netpoll_send_skb(to, skb); return; } br_hook = NF_BR_LOCAL_OUT; net = dev_net(skb->dev); indev = NULL; } NF_HOOK(NFPROTO_BRIDGE, br_hook, net, NULL, skb, indev, skb->dev, br_forward_finish); } static int deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, bool local_orig) { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; skb = skb_clone(skb, GFP_ATOMIC); if (!skb) { DEV_STATS_INC(dev, tx_dropped); return -ENOMEM; } __br_forward(prev, skb, local_orig); return 0; } /** * br_forward - forward a packet to a specific port * @to: destination port * @skb: packet being forwarded * @local_rcv: packet will be received locally after forwarding * @local_orig: packet is locally originated * * Should be called with rcu_read_lock. */ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, bool local_rcv, bool local_orig) { if (unlikely(!to)) goto out; /* redirect to backup link if the destination port is down */ if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) { struct net_bridge_port *backup_port; backup_port = rcu_dereference(to->backup_port); if (unlikely(!backup_port)) goto out; BR_INPUT_SKB_CB(skb)->backup_nhid = READ_ONCE(to->backup_nhid); to = backup_port; } if (should_deliver(to, skb)) { if (local_rcv) deliver_clone(to, skb, local_orig); else __br_forward(to, skb, local_orig); return; } out: if (!local_rcv) kfree_skb(skb); } EXPORT_SYMBOL_GPL(br_forward); static struct net_bridge_port *maybe_deliver( struct net_bridge_port *prev, struct net_bridge_port *p, struct sk_buff *skb, bool local_orig) { u8 igmp_type = br_multicast_igmp_type(skb); int err; if (!should_deliver(p, skb)) return prev; nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb); if (!prev) goto out; err = deliver_clone(prev, skb, local_orig); if (err) return ERR_PTR(err); out: br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX); return p; } /* called under rcu_read_lock */ void br_flood(struct net_bridge *br, struct sk_buff *skb, enum br_pkt_type pkt_type, bool local_rcv, bool local_orig, u16 vid) { enum skb_drop_reason reason = SKB_DROP_REASON_NO_TX_TARGET; struct net_bridge_port *prev = NULL; struct net_bridge_port *p; br_tc_skb_miss_set(skb, pkt_type != BR_PKT_BROADCAST); list_for_each_entry_rcu(p, &br->port_list, list) { /* Do not flood unicast traffic to ports that turn it off, nor * other traffic if flood off, except for traffic we originate */ switch (pkt_type) { case BR_PKT_UNICAST: if (!(p->flags & BR_FLOOD)) continue; break; case BR_PKT_MULTICAST: if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) continue; break; case BR_PKT_BROADCAST: if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev) continue; break; } /* Do not flood to ports that enable proxy ARP */ if (p->flags & BR_PROXYARP) continue; if (BR_INPUT_SKB_CB(skb)->proxyarp_replied && ((p->flags & BR_PROXYARP_WIFI) || br_is_neigh_suppress_enabled(p, vid))) continue; prev = maybe_deliver(prev, p, skb, local_orig); if (IS_ERR(prev)) { reason = PTR_ERR(prev) == -ENOMEM ? SKB_DROP_REASON_NOMEM : SKB_DROP_REASON_NOT_SPECIFIED; goto out; } } if (!prev) goto out; if (local_rcv) deliver_clone(prev, skb, local_orig); else __br_forward(prev, skb, local_orig); return; out: if (!local_rcv) kfree_skb_reason(skb, reason); } #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb, const unsigned char *addr, bool local_orig) { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; const unsigned char *src = eth_hdr(skb)->h_source; struct sk_buff *nskb; if (!should_deliver(p, skb)) return; /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */ if (skb->dev == p->dev && ether_addr_equal(src, addr)) return; __skb_push(skb, ETH_HLEN); nskb = pskb_copy(skb, GFP_ATOMIC); __skb_pull(skb, ETH_HLEN); if (!nskb) { DEV_STATS_INC(dev, tx_dropped); return; } skb = nskb; __skb_pull(skb, ETH_HLEN); if (!is_broadcast_ether_addr(addr)) memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN); __br_forward(p, skb, local_orig); } /* called with rcu_read_lock */ void br_multicast_flood(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct net_bridge_mcast *brmctx, bool local_rcv, bool local_orig) { enum skb_drop_reason reason = SKB_DROP_REASON_NO_TX_TARGET; struct net_bridge_port *prev = NULL; struct net_bridge_port_group *p; bool allow_mode_include = true; struct hlist_node *rp; rp = br_multicast_get_first_rport_node(brmctx, skb); if (mdst) { p = rcu_dereference(mdst->ports); if (br_multicast_should_handle_mode(brmctx, mdst->addr.proto) && br_multicast_is_star_g(&mdst->addr)) allow_mode_include = false; } else { p = NULL; br_tc_skb_miss_set(skb, true); } while (p || rp) { struct net_bridge_port *port, *lport, *rport; lport = p ? p->key.port : NULL; rport = br_multicast_rport_from_node_skb(rp, skb); if ((unsigned long)lport > (unsigned long)rport) { port = lport; if (port->flags & BR_MULTICAST_TO_UNICAST) { maybe_deliver_addr(lport, skb, p->eth_addr, local_orig); goto delivered; } if ((!allow_mode_include && p->filter_mode == MCAST_INCLUDE) || (p->flags & MDB_PG_FLAGS_BLOCKED)) goto delivered; } else { port = rport; } prev = maybe_deliver(prev, port, skb, local_orig); if (IS_ERR(prev)) { reason = PTR_ERR(prev) == -ENOMEM ? SKB_DROP_REASON_NOMEM : SKB_DROP_REASON_NOT_SPECIFIED; goto out; } delivered: if ((unsigned long)lport >= (unsigned long)port) p = rcu_dereference(p->next); if ((unsigned long)rport >= (unsigned long)port) rp = rcu_dereference(hlist_next_rcu(rp)); } if (!prev) goto out; if (local_rcv) deliver_clone(prev, skb, local_orig); else __br_forward(prev, skb, local_orig); return; out: if (!local_rcv) kfree_skb_reason(skb, reason); } #endif
8 8 9 2 9 7 1 7 9 27 27 27 135 126 126 170 169 170 72 164 101 41 136 55 72 72 72 55 55 55 73 46 68 135 136 110 136 32 136 136 136 113 136 134 136 135 136 136 113 136 136 136 136 136 72 135 113 126 135 72 136 134 90 134 90 90 90 90 90 206 206 206 136 136 13 136 136 13 136 136 136 136 136 13 135 39 82 195 42 118 119 164 50 149 149 125 121 9 8 7 3 122 122 19 90 90 90 7 90 90 55 74 112 112 164 164 106 3 3 154 2 154 152 1 152 45 19 19 17 9 1 9 5 4 3 3 3 3 3 11 11 11 11 198 198 198 1 198 198 106 143 46 97 97 106 194 194 6448 1 6453 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 // SPDX-License-Identifier: GPL-2.0-only /* * linux/mm/swapfile.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie */ #include <linux/blkdev.h> #include <linux/mm.h> #include <linux/sched/mm.h> #include <linux/sched/task.h> #include <linux/hugetlb.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/namei.h> #include <linux/shmem_fs.h> #include <linux/blk-cgroup.h> #include <linux/random.h> #include <linux/writeback.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/ksm.h> #include <linux/rmap.h> #include <linux/security.h> #include <linux/backing-dev.h> #include <linux/mutex.h> #include <linux/capability.h> #include <linux/syscalls.h> #include <linux/memcontrol.h> #include <linux/poll.h> #include <linux/oom.h> #include <linux/swapfile.h> #include <linux/export.h> #include <linux/sort.h> #include <linux/completion.h> #include <linux/suspend.h> #include <linux/zswap.h> #include <linux/plist.h> #include <asm/tlbflush.h> #include <linux/swapops.h> #include <linux/swap_cgroup.h> #include "internal.h" #include "swap.h" static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); static void swap_entries_free(struct swap_info_struct *si, struct swap_cluster_info *ci, swp_entry_t entry, unsigned int nr_pages); static void swap_range_alloc(struct swap_info_struct *si, unsigned int nr_entries); static bool folio_swapcache_freeable(struct folio *folio); static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, unsigned long offset); static inline void unlock_cluster(struct swap_cluster_info *ci); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; atomic_long_t nr_swap_pages; /* * Some modules use swappable objects and may try to swap them out under * memory pressure (via the shrinker). Before doing so, they may wish to * check to see if any swap space is available. */ EXPORT_SYMBOL_GPL(nr_swap_pages); /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ long total_swap_pages; static int least_priority = -1; unsigned long swapfile_maximum_size; #ifdef CONFIG_MIGRATION bool swap_migration_ad_supported; #endif /* CONFIG_MIGRATION */ static const char Bad_file[] = "Bad swap file entry "; static const char Unused_file[] = "Unused swap file entry "; static const char Bad_offset[] = "Bad swap offset entry "; static const char Unused_offset[] = "Unused swap offset entry "; /* * all active swap_info_structs * protected with swap_lock, and ordered by priority. */ static PLIST_HEAD(swap_active_head); /* * all available (active, not full) swap_info_structs * protected with swap_avail_lock, ordered by priority. * This is used by folio_alloc_swap() instead of swap_active_head * because swap_active_head includes all swap_info_structs, * but folio_alloc_swap() doesn't need to look at full ones. * This uses its own lock instead of swap_lock because when a * swap_info_struct changes between not-full/full, it needs to * add/remove itself to/from this list, but the swap_info_struct->lock * is held and the locking order requires swap_lock to be taken * before any swap_info_struct->lock. */ static struct plist_head *swap_avail_heads; static DEFINE_SPINLOCK(swap_avail_lock); static struct swap_info_struct *swap_info[MAX_SWAPFILES]; static DEFINE_MUTEX(swapon_mutex); static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); /* Activity counter to indicate that a swapon or swapoff has occurred */ static atomic_t proc_poll_event = ATOMIC_INIT(0); atomic_t nr_rotate_swap = ATOMIC_INIT(0); struct percpu_swap_cluster { struct swap_info_struct *si[SWAP_NR_ORDERS]; unsigned long offset[SWAP_NR_ORDERS]; local_lock_t lock; }; static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = { .si = { NULL }, .offset = { SWAP_ENTRY_INVALID }, .lock = INIT_LOCAL_LOCK(), }; static struct swap_info_struct *swap_type_to_swap_info(int type) { if (type >= MAX_SWAPFILES) return NULL; return READ_ONCE(swap_info[type]); /* rcu_dereference() */ } static inline unsigned char swap_count(unsigned char ent) { return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ } /* * Use the second highest bit of inuse_pages counter as the indicator * if one swap device is on the available plist, so the atomic can * still be updated arithmetically while having special data embedded. * * inuse_pages counter is the only thing indicating if a device should * be on avail_lists or not (except swapon / swapoff). By embedding the * off-list bit in the atomic counter, updates no longer need any lock * to check the list status. * * This bit will be set if the device is not on the plist and not * usable, will be cleared if the device is on the plist. */ #define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2)) #define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT) static long swap_usage_in_pages(struct swap_info_struct *si) { return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK; } /* Reclaim the swap entry anyway if possible */ #define TTRS_ANYWAY 0x1 /* * Reclaim the swap entry if there are no more mappings of the * corresponding page */ #define TTRS_UNMAPPED 0x2 /* Reclaim the swap entry if swap is getting full */ #define TTRS_FULL 0x4 static bool swap_only_has_cache(struct swap_info_struct *si, unsigned long offset, int nr_pages) { unsigned char *map = si->swap_map + offset; unsigned char *map_end = map + nr_pages; do { VM_BUG_ON(!(*map & SWAP_HAS_CACHE)); if (*map != SWAP_HAS_CACHE) return false; } while (++map < map_end); return true; } static bool swap_is_last_map(struct swap_info_struct *si, unsigned long offset, int nr_pages, bool *has_cache) { unsigned char *map = si->swap_map + offset; unsigned char *map_end = map + nr_pages; unsigned char count = *map; if (swap_count(count) != 1 && swap_count(count) != SWAP_MAP_SHMEM) return false; while (++map < map_end) { if (*map != count) return false; } *has_cache = !!(count & SWAP_HAS_CACHE); return true; } /* * returns number of pages in the folio that backs the swap entry. If positive, * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no * folio was associated with the swap entry. */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset, unsigned long flags) { swp_entry_t entry = swp_entry(si->type, offset); struct address_space *address_space = swap_address_space(entry); struct swap_cluster_info *ci; struct folio *folio; int ret, nr_pages; bool need_reclaim; again: folio = filemap_get_folio(address_space, swap_cache_index(entry)); if (IS_ERR(folio)) return 0; nr_pages = folio_nr_pages(folio); ret = -nr_pages; /* * When this function is called from scan_swap_map_slots() and it's * called by vmscan.c at reclaiming folios. So we hold a folio lock * here. We have to use trylock for avoiding deadlock. This is a special * case and you should use folio_free_swap() with explicit folio_lock() * in usual operations. */ if (!folio_trylock(folio)) goto out; /* * Offset could point to the middle of a large folio, or folio * may no longer point to the expected offset before it's locked. */ entry = folio->swap; if (offset < swp_offset(entry) || offset >= swp_offset(entry) + nr_pages) { folio_unlock(folio); folio_put(folio); goto again; } offset = swp_offset(entry); need_reclaim = ((flags & TTRS_ANYWAY) || ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))); if (!need_reclaim || !folio_swapcache_freeable(folio)) goto out_unlock; /* * It's safe to delete the folio from swap cache only if the folio's * swap_map is HAS_CACHE only, which means the slots have no page table * reference or pending writeback, and can't be allocated to others. */ ci = lock_cluster(si, offset); need_reclaim = swap_only_has_cache(si, offset, nr_pages); unlock_cluster(ci); if (!need_reclaim) goto out_unlock; delete_from_swap_cache(folio); folio_set_dirty(folio); ret = nr_pages; out_unlock: folio_unlock(folio); out: folio_put(folio); return ret; } static inline struct swap_extent *first_se(struct swap_info_struct *sis) { struct rb_node *rb = rb_first(&sis->swap_extent_root); return rb_entry(rb, struct swap_extent, rb_node); } static inline struct swap_extent *next_se(struct swap_extent *se) { struct rb_node *rb = rb_next(&se->rb_node); return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL; } /* * swapon tell device that all the old swap contents can be discarded, * to allow the swap device to optimize its wear-levelling. */ static int discard_swap(struct swap_info_struct *si) { struct swap_extent *se; sector_t start_block; sector_t nr_blocks; int err = 0; /* Do not discard the swap header page! */ se = first_se(si); start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); if (nr_blocks) { err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL); if (err) return err; cond_resched(); } for (se = next_se(se); se; se = next_se(se)) { start_block = se->start_block << (PAGE_SHIFT - 9); nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); err = blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_KERNEL); if (err) break; cond_resched(); } return err; /* That will often be -EOPNOTSUPP */ } static struct swap_extent * offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset) { struct swap_extent *se; struct rb_node *rb; rb = sis->swap_extent_root.rb_node; while (rb) { se = rb_entry(rb, struct swap_extent, rb_node); if (offset < se->start_page) rb = rb->rb_left; else if (offset >= se->start_page + se->nr_pages) rb = rb->rb_right; else return se; } /* It *must* be present */ BUG(); } sector_t swap_folio_sector(struct folio *folio) { struct swap_info_struct *sis = swp_swap_info(folio->swap); struct swap_extent *se; sector_t sector; pgoff_t offset; offset = swp_offset(folio->swap); se = offset_to_swap_extent(sis, offset); sector = se->start_block + (offset - se->start_page); return sector << (PAGE_SHIFT - 9); } /* * swap allocation tell device that a cluster of swap can now be discarded, * to allow the swap device to optimize its wear-levelling. */ static void discard_swap_cluster(struct swap_info_struct *si, pgoff_t start_page, pgoff_t nr_pages) { struct swap_extent *se = offset_to_swap_extent(si, start_page); while (nr_pages) { pgoff_t offset = start_page - se->start_page; sector_t start_block = se->start_block + offset; sector_t nr_blocks = se->nr_pages - offset; if (nr_blocks > nr_pages) nr_blocks = nr_pages; start_page += nr_blocks; nr_pages -= nr_blocks; start_block <<= PAGE_SHIFT - 9; nr_blocks <<= PAGE_SHIFT - 9; if (blkdev_issue_discard(si->bdev, start_block, nr_blocks, GFP_NOIO)) break; se = next_se(se); } } #ifdef CONFIG_THP_SWAP #define SWAPFILE_CLUSTER HPAGE_PMD_NR #define swap_entry_order(order) (order) #else #define SWAPFILE_CLUSTER 256 /* * Define swap_entry_order() as constant to let compiler to optimize * out some code if !CONFIG_THP_SWAP */ #define swap_entry_order(order) 0 #endif #define LATENCY_LIMIT 256 static inline bool cluster_is_empty(struct swap_cluster_info *info) { return info->count == 0; } static inline bool cluster_is_discard(struct swap_cluster_info *info) { return info->flags == CLUSTER_FLAG_DISCARD; } static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order) { if (unlikely(ci->flags > CLUSTER_FLAG_USABLE)) return false; if (!order) return true; return cluster_is_empty(ci) || order == ci->order; } static inline unsigned int cluster_index(struct swap_info_struct *si, struct swap_cluster_info *ci) { return ci - si->cluster_info; } static inline struct swap_cluster_info *offset_to_cluster(struct swap_info_struct *si, unsigned long offset) { return &si->cluster_info[offset / SWAPFILE_CLUSTER]; } static inline unsigned int cluster_offset(struct swap_info_struct *si, struct swap_cluster_info *ci) { return cluster_index(si, ci) * SWAPFILE_CLUSTER; } static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, unsigned long offset) { struct swap_cluster_info *ci; ci = offset_to_cluster(si, offset); spin_lock(&ci->lock); return ci; } static inline void unlock_cluster(struct swap_cluster_info *ci) { spin_unlock(&ci->lock); } static void move_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci, struct list_head *list, enum swap_cluster_flags new_flags) { VM_WARN_ON(ci->flags == new_flags); BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX); lockdep_assert_held(&ci->lock); spin_lock(&si->lock); if (ci->flags == CLUSTER_FLAG_NONE) list_add_tail(&ci->list, list); else list_move_tail(&ci->list, list); spin_unlock(&si->lock); if (ci->flags == CLUSTER_FLAG_FRAG) atomic_long_dec(&si->frag_cluster_nr[ci->order]); else if (new_flags == CLUSTER_FLAG_FRAG) atomic_long_inc(&si->frag_cluster_nr[ci->order]); ci->flags = new_flags; } /* Add a cluster to discard list and schedule it to do discard */ static void swap_cluster_schedule_discard(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); schedule_work(&si->discard_work); } static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { lockdep_assert_held(&ci->lock); move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); ci->order = 0; } /* * Isolate and lock the first cluster that is not contented on a list, * clean its flag before taken off-list. Cluster flag must be in sync * with list status, so cluster updaters can always know the cluster * list status without touching si lock. * * Note it's possible that all clusters on a list are contented so * this returns NULL for an non-empty list. */ static struct swap_cluster_info *isolate_lock_cluster( struct swap_info_struct *si, struct list_head *list) { struct swap_cluster_info *ci, *ret = NULL; spin_lock(&si->lock); if (unlikely(!(si->flags & SWP_WRITEOK))) goto out; list_for_each_entry(ci, list, list) { if (!spin_trylock(&ci->lock)) continue; /* We may only isolate and clear flags of following lists */ VM_BUG_ON(!ci->flags); VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE && ci->flags != CLUSTER_FLAG_FULL); list_del(&ci->list); ci->flags = CLUSTER_FLAG_NONE; ret = ci; break; } out: spin_unlock(&si->lock); return ret; } /* * Doing discard actually. After a cluster discard is finished, the cluster * will be added to free cluster list. Discard cluster is a bit special as * they don't participate in allocation or reclaim, so clusters marked as * CLUSTER_FLAG_DISCARD must remain off-list or on discard list. */ static bool swap_do_scheduled_discard(struct swap_info_struct *si) { struct swap_cluster_info *ci; bool ret = false; unsigned int idx; spin_lock(&si->lock); while (!list_empty(&si->discard_clusters)) { ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); /* * Delete the cluster from list to prepare for discard, but keep * the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be * pointing to it, or ran into by relocate_cluster. */ list_del(&ci->list); idx = cluster_index(si, ci); spin_unlock(&si->lock); discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, SWAPFILE_CLUSTER); spin_lock(&ci->lock); /* * Discard is done, clear its flags as it's off-list, then * return the cluster to allocation list. */ ci->flags = CLUSTER_FLAG_NONE; __free_cluster(si, ci); spin_unlock(&ci->lock); ret = true; spin_lock(&si->lock); } spin_unlock(&si->lock); return ret; } static void swap_discard_work(struct work_struct *work) { struct swap_info_struct *si; si = container_of(work, struct swap_info_struct, discard_work); swap_do_scheduled_discard(si); } static void swap_users_ref_free(struct percpu_ref *ref) { struct swap_info_struct *si; si = container_of(ref, struct swap_info_struct, users); complete(&si->comp); } /* * Must be called after freeing if ci->count == 0, moves the cluster to free * or discard list. */ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(ci->count != 0); VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); lockdep_assert_held(&ci->lock); /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed * after discard. */ if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == (SWP_WRITEOK | SWP_PAGE_DISCARD)) { swap_cluster_schedule_discard(si, ci); return; } __free_cluster(si, ci); } /* * Must be called after freeing if ci->count != 0, moves the cluster to * nonfull list. */ static void partial_free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER); lockdep_assert_held(&ci->lock); if (ci->flags != CLUSTER_FLAG_NONFULL) move_cluster(si, ci, &si->nonfull_clusters[ci->order], CLUSTER_FLAG_NONFULL); } /* * Must be called after allocation, moves the cluster to full or frag list. * Note: allocation doesn't acquire si lock, and may drop the ci lock for * reclaim, so the cluster could be any where when called. */ static void relocate_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { lockdep_assert_held(&ci->lock); /* Discard cluster must remain off-list or on discard list */ if (cluster_is_discard(ci)) return; if (!ci->count) { if (ci->flags != CLUSTER_FLAG_FREE) free_cluster(si, ci); } else if (ci->count != SWAPFILE_CLUSTER) { if (ci->flags != CLUSTER_FLAG_FRAG) move_cluster(si, ci, &si->frag_clusters[ci->order], CLUSTER_FLAG_FRAG); } else { if (ci->flags != CLUSTER_FLAG_FULL) move_cluster(si, ci, &si->full_clusters, CLUSTER_FLAG_FULL); } } /* * The cluster corresponding to page_nr will be used. The cluster will not be * added to free cluster list and its usage counter will be increased by 1. * Only used for initialization. */ static void inc_cluster_info_page(struct swap_info_struct *si, struct swap_cluster_info *cluster_info, unsigned long page_nr) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; struct swap_cluster_info *ci; ci = cluster_info + idx; ci->count++; VM_BUG_ON(ci->count > SWAPFILE_CLUSTER); VM_BUG_ON(ci->flags); } static bool cluster_reclaim_range(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned long start, unsigned long end) { unsigned char *map = si->swap_map; unsigned long offset = start; int nr_reclaim; spin_unlock(&ci->lock); do { switch (READ_ONCE(map[offset])) { case 0: offset++; break; case SWAP_HAS_CACHE: nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); if (nr_reclaim > 0) offset += nr_reclaim; else goto out; break; default: goto out; } } while (offset < end); out: spin_lock(&ci->lock); /* * Recheck the range no matter reclaim succeeded or not, the slot * could have been be freed while we are not holding the lock. */ for (offset = start; offset < end; offset++) if (READ_ONCE(map[offset])) return false; return true; } static bool cluster_scan_range(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned long start, unsigned int nr_pages, bool *need_reclaim) { unsigned long offset, end = start + nr_pages; unsigned char *map = si->swap_map; if (cluster_is_empty(ci)) return true; for (offset = start; offset < end; offset++) { switch (READ_ONCE(map[offset])) { case 0: continue; case SWAP_HAS_CACHE: if (!vm_swap_full()) return false; *need_reclaim = true; continue; default: return false; } } return true; } static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned int start, unsigned char usage, unsigned int order) { unsigned int nr_pages = 1 << order; lockdep_assert_held(&ci->lock); if (!(si->flags & SWP_WRITEOK)) return false; /* * The first allocation in a cluster makes the * cluster exclusive to this order */ if (cluster_is_empty(ci)) ci->order = order; memset(si->swap_map + start, usage, nr_pages); swap_range_alloc(si, nr_pages); ci->count += nr_pages; return true; } /* Try use a new cluster for current CPU and allocate from it. */ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned long offset, unsigned int order, unsigned char usage) { unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER); unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); unsigned int nr_pages = 1 << order; bool need_reclaim, ret; lockdep_assert_held(&ci->lock); if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER) goto out; for (end -= nr_pages; offset <= end; offset += nr_pages) { need_reclaim = false; if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) continue; if (need_reclaim) { ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages); /* * Reclaim drops ci->lock and cluster could be used * by another order. Not checking flag as off-list * cluster has no flag set, and change of list * won't cause fragmentation. */ if (!cluster_is_usable(ci, order)) goto out; if (cluster_is_empty(ci)) offset = start; /* Reclaim failed but cluster is usable, try next */ if (!ret) continue; } if (!cluster_alloc_range(si, ci, offset, usage, order)) break; found = offset; offset += nr_pages; if (ci->count < SWAPFILE_CLUSTER && offset <= end) next = offset; break; } out: relocate_cluster(si, ci); unlock_cluster(ci); if (si->flags & SWP_SOLIDSTATE) { this_cpu_write(percpu_swap_cluster.offset[order], next); this_cpu_write(percpu_swap_cluster.si[order], si); } else { si->global_cluster->next[order] = next; } return found; } static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) { long to_scan = 1; unsigned long offset, end; struct swap_cluster_info *ci; unsigned char *map = si->swap_map; int nr_reclaim; if (force) to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER; while ((ci = isolate_lock_cluster(si, &si->full_clusters))) { offset = cluster_offset(si, ci); end = min(si->max, offset + SWAPFILE_CLUSTER); to_scan--; while (offset < end) { if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) { spin_unlock(&ci->lock); nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); spin_lock(&ci->lock); if (nr_reclaim) { offset += abs(nr_reclaim); continue; } } offset++; } /* in case no swap cache is reclaimed */ if (ci->flags == CLUSTER_FLAG_NONE) relocate_cluster(si, ci); unlock_cluster(ci); if (to_scan <= 0) break; } } static void swap_reclaim_work(struct work_struct *work) { struct swap_info_struct *si; si = container_of(work, struct swap_info_struct, reclaim_work); swap_reclaim_full_clusters(si, true); } /* * Try to allocate swap entries with specified order and try set a new * cluster for current CPU too. */ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, unsigned char usage) { struct swap_cluster_info *ci; unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; /* * Swapfile is not block device so unable * to allocate large entries. */ if (order && !(si->flags & SWP_BLKDEV)) return 0; if (!(si->flags & SWP_SOLIDSTATE)) { /* Serialize HDD SWAP allocation for each device. */ spin_lock(&si->global_cluster_lock); offset = si->global_cluster->next[order]; if (offset == SWAP_ENTRY_INVALID) goto new_cluster; ci = lock_cluster(si, offset); /* Cluster could have been used by another order */ if (cluster_is_usable(ci, order)) { if (cluster_is_empty(ci)) offset = cluster_offset(si, ci); found = alloc_swap_scan_cluster(si, ci, offset, order, usage); } else { unlock_cluster(ci); } if (found) goto done; } new_cluster: ci = isolate_lock_cluster(si, &si->free_clusters); if (ci) { found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), order, usage); if (found) goto done; } /* Try reclaim from full clusters if free clusters list is drained */ if (vm_swap_full()) swap_reclaim_full_clusters(si, false); if (order < PMD_ORDER) { unsigned int frags = 0, frags_existing; while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) { found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), order, usage); if (found) goto done; /* Clusters failed to allocate are moved to frag_clusters */ frags++; } frags_existing = atomic_long_read(&si->frag_cluster_nr[order]); while (frags < frags_existing && (ci = isolate_lock_cluster(si, &si->frag_clusters[order]))) { atomic_long_dec(&si->frag_cluster_nr[order]); /* * Rotate the frag list to iterate, they were all * failing high order allocation or moved here due to * per-CPU usage, but they could contain newly released * reclaimable (eg. lazy-freed swap cache) slots. */ found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), order, usage); if (found) goto done; frags++; } } /* * We don't have free cluster but have some clusters in * discarding, do discard now and reclaim them, then * reread cluster_next_cpu since we dropped si->lock */ if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si)) goto new_cluster; if (order) goto done; /* Order 0 stealing from higher order */ for (int o = 1; o < SWAP_NR_ORDERS; o++) { /* * Clusters here have at least one usable slots and can't fail order 0 * allocation, but reclaim may drop si->lock and race with another user. */ while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) { atomic_long_dec(&si->frag_cluster_nr[o]); found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), 0, usage); if (found) goto done; } while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) { found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), 0, usage); if (found) goto done; } } done: if (!(si->flags & SWP_SOLIDSTATE)) spin_unlock(&si->global_cluster_lock); return found; } /* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */ static void del_from_avail_list(struct swap_info_struct *si, bool swapoff) { int nid; unsigned long pages; spin_lock(&swap_avail_lock); if (swapoff) { /* * Forcefully remove it. Clear the SWP_WRITEOK flags for * swapoff here so it's synchronized by both si->lock and * swap_avail_lock, to ensure the result can be seen by * add_to_avail_list. */ lockdep_assert_held(&si->lock); si->flags &= ~SWP_WRITEOK; atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); } else { /* * If not called by swapoff, take it off-list only if it's * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly * si->inuse_pages == pages), any concurrent slot freeing, * or device already removed from plist by someone else * will make this return false. */ pages = si->pages; if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages, pages | SWAP_USAGE_OFFLIST_BIT)) goto skip; } for_each_node(nid) plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]); skip: spin_unlock(&swap_avail_lock); } /* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */ static void add_to_avail_list(struct swap_info_struct *si, bool swapon) { int nid; long val; unsigned long pages; spin_lock(&swap_avail_lock); /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */ if (swapon) { lockdep_assert_held(&si->lock); si->flags |= SWP_WRITEOK; } else { if (!(READ_ONCE(si->flags) & SWP_WRITEOK)) goto skip; } if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT)) goto skip; val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); /* * When device is full and device is on the plist, only one updater will * see (inuse_pages == si->pages) and will call del_from_avail_list. If * that updater happen to be here, just skip adding. */ pages = si->pages; if (val == pages) { /* Just like the cmpxchg in del_from_avail_list */ if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages, pages | SWAP_USAGE_OFFLIST_BIT)) goto skip; } for_each_node(nid) plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]); skip: spin_unlock(&swap_avail_lock); } /* * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock * within each cluster, so the total contribution to the global counter should * always be positive and cannot exceed the total number of usable slots. */ static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries) { long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages); /* * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set, * remove it from the plist. */ if (unlikely(val == si->pages)) { del_from_avail_list(si, false); return true; } return false; } static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) { long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages); /* * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set, * add it to the plist. */ if (unlikely(val & SWAP_USAGE_OFFLIST_BIT)) add_to_avail_list(si, false); } static void swap_range_alloc(struct swap_info_struct *si, unsigned int nr_entries) { if (swap_usage_add(si, nr_entries)) { if (vm_swap_full()) schedule_work(&si->reclaim_work); } } static void swap_range_free(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { unsigned long begin = offset; unsigned long end = offset + nr_entries - 1; void (*swap_slot_free_notify)(struct block_device *, unsigned long); unsigned int i; /* * Use atomic clear_bit operations only on zeromap instead of non-atomic * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes. */ for (i = 0; i < nr_entries; i++) { clear_bit(offset + i, si->zeromap); zswap_invalidate(swp_entry(si->type, offset + i)); } if (si->flags & SWP_BLKDEV) swap_slot_free_notify = si->bdev->bd_disk->fops->swap_slot_free_notify; else swap_slot_free_notify = NULL; while (offset <= end) { arch_swap_invalidate_page(si->type, offset); if (swap_slot_free_notify) swap_slot_free_notify(si->bdev, offset); offset++; } clear_shadow_from_swap_cache(si->type, begin, end); /* * Make sure that try_to_unuse() observes si->inuse_pages reaching 0 * only after the above cleanups are done. */ smp_wmb(); atomic_long_add(nr_entries, &nr_swap_pages); swap_usage_sub(si, nr_entries); } static bool get_swap_device_info(struct swap_info_struct *si) { if (!percpu_ref_tryget_live(&si->users)) return false; /* * Guarantee the si->users are checked before accessing other * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is * up to dated. * * Paired with the spin_unlock() after setup_swap_info() in * enable_swap_info(), and smp_wmb() in swapoff. */ smp_rmb(); return true; } /* * Fast path try to get swap entries with specified order from current * CPU's swap entry pool (a cluster). */ static bool swap_alloc_fast(swp_entry_t *entry, int order) { struct swap_cluster_info *ci; struct swap_info_struct *si; unsigned int offset, found = SWAP_ENTRY_INVALID; /* * Once allocated, swap_info_struct will never be completely freed, * so checking it's liveness by get_swap_device_info is enough. */ si = this_cpu_read(percpu_swap_cluster.si[order]); offset = this_cpu_read(percpu_swap_cluster.offset[order]); if (!si || !offset || !get_swap_device_info(si)) return false; ci = lock_cluster(si, offset); if (cluster_is_usable(ci, order)) { if (cluster_is_empty(ci)) offset = cluster_offset(si, ci); found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE); if (found) *entry = swp_entry(si->type, found); } else { unlock_cluster(ci); } put_swap_device(si); return !!found; } /* Rotate the device and switch to a new cluster */ static bool swap_alloc_slow(swp_entry_t *entry, int order) { int node; unsigned long offset; struct swap_info_struct *si, *next; node = numa_node_id(); spin_lock(&swap_avail_lock); start_over: plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { /* Rotate the device and switch to a new cluster */ plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); if (get_swap_device_info(si)) { offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE); put_swap_device(si); if (offset) { *entry = swp_entry(si->type, offset); return true; } if (order) return false; } spin_lock(&swap_avail_lock); /* * if we got here, it's likely that si was almost full before, * and since scan_swap_map_slots() can drop the si->lock, * multiple callers probably all tried to get a page from the * same si and it filled up before we could get one; or, the si * filled up between us dropping swap_avail_lock and taking * si->lock. Since we dropped the swap_avail_lock, the * swap_avail_head list may have been modified; so if next is * still in the swap_avail_head list then try it, otherwise * start over if we have not gotten any slots. */ if (plist_node_empty(&next->avail_lists[node])) goto start_over; } spin_unlock(&swap_avail_lock); return false; } /** * folio_alloc_swap - allocate swap space for a folio * @folio: folio we want to move to swap * @gfp: gfp mask for shadow nodes * * Allocate swap space for the folio and add the folio to the * swap cache. * * Context: Caller needs to hold the folio lock. * Return: Whether the folio was added to the swap cache. */ int folio_alloc_swap(struct folio *folio, gfp_t gfp) { unsigned int order = folio_order(folio); unsigned int size = 1 << order; swp_entry_t entry = {}; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); if (order) { /* * Reject large allocation when THP_SWAP is disabled, * the caller should split the folio and try again. */ if (!IS_ENABLED(CONFIG_THP_SWAP)) return -EAGAIN; /* * Allocation size should never exceed cluster size * (HPAGE_PMD_SIZE). */ if (size > SWAPFILE_CLUSTER) { VM_WARN_ON_ONCE(1); return -EINVAL; } } local_lock(&percpu_swap_cluster.lock); if (!swap_alloc_fast(&entry, order)) swap_alloc_slow(&entry, order); local_unlock(&percpu_swap_cluster.lock); /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */ if (mem_cgroup_try_charge_swap(folio, entry)) goto out_free; if (!entry.val) return -ENOMEM; /* * XArray node allocations from PF_MEMALLOC contexts could * completely exhaust the page allocator. __GFP_NOMEMALLOC * stops emergency reserves from being allocated. * * TODO: this could cause a theoretical memory reclaim * deadlock in the swap out path. */ if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL)) goto out_free; atomic_long_sub(size, &nr_swap_pages); return 0; out_free: put_swap_folio(folio, entry); return -ENOMEM; } static struct swap_info_struct *_swap_info_get(swp_entry_t entry) { struct swap_info_struct *si; unsigned long offset; if (!entry.val) goto out; si = swp_swap_info(entry); if (!si) goto bad_nofile; if (data_race(!(si->flags & SWP_USED))) goto bad_device; offset = swp_offset(entry); if (offset >= si->max) goto bad_offset; if (data_race(!si->swap_map[swp_offset(entry)])) goto bad_free; return si; bad_free: pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val); goto out; bad_offset: pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val); goto out; bad_device: pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val); goto out; bad_nofile: pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); out: return NULL; } static unsigned char swap_entry_put_locked(struct swap_info_struct *si, struct swap_cluster_info *ci, swp_entry_t entry, unsigned char usage) { unsigned long offset = swp_offset(entry); unsigned char count; unsigned char has_cache; count = si->swap_map[offset]; has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; if (usage == SWAP_HAS_CACHE) { VM_BUG_ON(!has_cache); has_cache = 0; } else if (count == SWAP_MAP_SHMEM) { /* * Or we could insist on shmem.c using a special * swap_shmem_free() and free_shmem_swap_and_cache()... */ count = 0; } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { if (count == COUNT_CONTINUED) { if (swap_count_continued(si, offset, count)) count = SWAP_MAP_MAX | COUNT_CONTINUED; else count = SWAP_MAP_MAX; } else count--; } usage = count | has_cache; if (usage) WRITE_ONCE(si->swap_map[offset], usage); else swap_entries_free(si, ci, entry, 1); return usage; } /* * When we get a swap entry, if there aren't some other ways to * prevent swapoff, such as the folio in swap cache is locked, RCU * reader side is locked, etc., the swap entry may become invalid * because of swapoff. Then, we need to enclose all swap related * functions with get_swap_device() and put_swap_device(), unless the * swap functions call get/put_swap_device() by themselves. * * RCU reader side lock (including any spinlock) is sufficient to * prevent swapoff, because synchronize_rcu() is called in swapoff() * before freeing data structures. * * Check whether swap entry is valid in the swap device. If so, * return pointer to swap_info_struct, and keep the swap entry valid * via preventing the swap device from being swapoff, until * put_swap_device() is called. Otherwise return NULL. * * Notice that swapoff or swapoff+swapon can still happen before the * percpu_ref_tryget_live() in get_swap_device() or after the * percpu_ref_put() in put_swap_device() if there isn't any other way * to prevent swapoff. The caller must be prepared for that. For * example, the following situation is possible. * * CPU1 CPU2 * do_swap_page() * ... swapoff+swapon * __read_swap_cache_async() * swapcache_prepare() * __swap_duplicate() * // check swap_map * // verify PTE not changed * * In __swap_duplicate(), the swap_map need to be checked before * changing partly because the specified swap entry may be for another * swap device which has been swapoff. And in do_swap_page(), after * the page is read from the swap device, the PTE is verified not * changed with the page table locked to check whether the swap device * has been swapoff or swapoff+swapon. */ struct swap_info_struct *get_swap_device(swp_entry_t entry) { struct swap_info_struct *si; unsigned long offset; if (!entry.val) goto out; si = swp_swap_info(entry); if (!si) goto bad_nofile; if (!get_swap_device_info(si)) goto out; offset = swp_offset(entry); if (offset >= si->max) goto put_out; return si; bad_nofile: pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); out: return NULL; put_out: pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val); percpu_ref_put(&si->users); return NULL; } static void swap_entries_put_cache(struct swap_info_struct *si, swp_entry_t entry, int nr) { unsigned long offset = swp_offset(entry); struct swap_cluster_info *ci; ci = lock_cluster(si, offset); if (swap_only_has_cache(si, offset, nr)) swap_entries_free(si, ci, entry, nr); else { for (int i = 0; i < nr; i++, entry.val++) swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE); } unlock_cluster(ci); } static bool swap_entries_put_map(struct swap_info_struct *si, swp_entry_t entry, int nr) { unsigned long offset = swp_offset(entry); struct swap_cluster_info *ci; bool has_cache = false; unsigned char count; int i; if (nr <= 1) goto fallback; count = swap_count(data_race(si->swap_map[offset])); if (count != 1 && count != SWAP_MAP_SHMEM) goto fallback; ci = lock_cluster(si, offset); if (!swap_is_last_map(si, offset, nr, &has_cache)) { goto locked_fallback; } if (!has_cache) swap_entries_free(si, ci, entry, nr); else for (i = 0; i < nr; i++) WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE); unlock_cluster(ci); return has_cache; fallback: ci = lock_cluster(si, offset); locked_fallback: for (i = 0; i < nr; i++, entry.val++) { count = swap_entry_put_locked(si, ci, entry, 1); if (count == SWAP_HAS_CACHE) has_cache = true; } unlock_cluster(ci); return has_cache; } /* * Only functions with "_nr" suffix are able to free entries spanning * cross multi clusters, so ensure the range is within a single cluster * when freeing entries with functions without "_nr" suffix. */ static bool swap_entries_put_map_nr(struct swap_info_struct *si, swp_entry_t entry, int nr) { int cluster_nr, cluster_rest; unsigned long offset = swp_offset(entry); bool has_cache = false; cluster_rest = SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER; while (nr) { cluster_nr = min(nr, cluster_rest); has_cache |= swap_entries_put_map(si, entry, cluster_nr); cluster_rest = SWAPFILE_CLUSTER; nr -= cluster_nr; entry.val += cluster_nr; } return has_cache; } /* * Check if it's the last ref of swap entry in the freeing path. * Qualified vlaue includes 1, SWAP_HAS_CACHE or SWAP_MAP_SHMEM. */ static inline bool __maybe_unused swap_is_last_ref(unsigned char count) { return (count == SWAP_HAS_CACHE) || (count == 1) || (count == SWAP_MAP_SHMEM); } /* * Drop the last ref of swap entries, caller have to ensure all entries * belong to the same cgroup and cluster. */ static void swap_entries_free(struct swap_info_struct *si, struct swap_cluster_info *ci, swp_entry_t entry, unsigned int nr_pages) { unsigned long offset = swp_offset(entry); unsigned char *map = si->swap_map + offset; unsigned char *map_end = map + nr_pages; /* It should never free entries across different clusters */ VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); VM_BUG_ON(cluster_is_empty(ci)); VM_BUG_ON(ci->count < nr_pages); ci->count -= nr_pages; do { VM_BUG_ON(!swap_is_last_ref(*map)); *map = 0; } while (++map < map_end); mem_cgroup_uncharge_swap(entry, nr_pages); swap_range_free(si, offset, nr_pages); if (!ci->count) free_cluster(si, ci); else partial_free_cluster(si, ci); } /* * Caller has made sure that the swap device corresponding to entry * is still around or has not been recycled. */ void swap_free_nr(swp_entry_t entry, int nr_pages) { int nr; struct swap_info_struct *sis; unsigned long offset = swp_offset(entry); sis = _swap_info_get(entry); if (!sis) return; while (nr_pages) { nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); swap_entries_put_map(sis, swp_entry(sis->type, offset), nr); offset += nr; nr_pages -= nr; } } /* * Called after dropping swapcache to decrease refcnt to swap entries. */ void put_swap_folio(struct folio *folio, swp_entry_t entry) { struct swap_info_struct *si; int size = 1 << swap_entry_order(folio_order(folio)); si = _swap_info_get(entry); if (!si) return; swap_entries_put_cache(si, entry, size); } int __swap_count(swp_entry_t entry) { struct swap_info_struct *si = swp_swap_info(entry); pgoff_t offset = swp_offset(entry); return swap_count(si->swap_map[offset]); } /* * How many references to @entry are currently swapped out? * This does not give an exact answer when swap count is continued, * but does include the high COUNT_CONTINUED flag to allow for that. */ bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) { pgoff_t offset = swp_offset(entry); struct swap_cluster_info *ci; int count; ci = lock_cluster(si, offset); count = swap_count(si->swap_map[offset]); unlock_cluster(ci); return !!count; } /* * How many references to @entry are currently swapped out? * This considers COUNT_CONTINUED so it returns exact answer. */ int swp_swapcount(swp_entry_t entry) { int count, tmp_count, n; struct swap_info_struct *si; struct swap_cluster_info *ci; struct page *page; pgoff_t offset; unsigned char *map; si = _swap_info_get(entry); if (!si) return 0; offset = swp_offset(entry); ci = lock_cluster(si, offset); count = swap_count(si->swap_map[offset]); if (!(count & COUNT_CONTINUED)) goto out; count &= ~COUNT_CONTINUED; n = SWAP_MAP_MAX + 1; page = vmalloc_to_page(si->swap_map + offset); offset &= ~PAGE_MASK; VM_BUG_ON(page_private(page) != SWP_CONTINUED); do { page = list_next_entry(page, lru); map = kmap_local_page(page); tmp_count = map[offset]; kunmap_local(map); count += (tmp_count & ~COUNT_CONTINUED) * n; n *= (SWAP_CONT_MAX + 1); } while (tmp_count & COUNT_CONTINUED); out: unlock_cluster(ci); return count; } static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, swp_entry_t entry, int order) { struct swap_cluster_info *ci; unsigned char *map = si->swap_map; unsigned int nr_pages = 1 << order; unsigned long roffset = swp_offset(entry); unsigned long offset = round_down(roffset, nr_pages); int i; bool ret = false; ci = lock_cluster(si, offset); if (nr_pages == 1) { if (swap_count(map[roffset])) ret = true; goto unlock_out; } for (i = 0; i < nr_pages; i++) { if (swap_count(map[offset + i])) { ret = true; break; } } unlock_out: unlock_cluster(ci); return ret; } static bool folio_swapped(struct folio *folio) { swp_entry_t entry = folio->swap; struct swap_info_struct *si = _swap_info_get(entry); if (!si) return false; if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio))) return swap_entry_swapped(si, entry); return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); } static bool folio_swapcache_freeable(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); if (!folio_test_swapcache(folio)) return false; if (folio_test_writeback(folio)) return false; /* * Once hibernation has begun to create its image of memory, * there's a danger that one of the calls to folio_free_swap() * - most probably a call from __try_to_reclaim_swap() while * hibernation is allocating its own swap pages for the image, * but conceivably even a call from memory reclaim - will free * the swap from a folio which has already been recorded in the * image as a clean swapcache folio, and then reuse its swap for * another page of the image. On waking from hibernation, the * original folio might be freed under memory pressure, then * later read back in from swap, now with the wrong data. * * Hibernation suspends storage while it is writing the image * to disk so check that here. */ if (pm_suspended_storage()) return false; return true; } /** * folio_free_swap() - Free the swap space used for this folio. * @folio: The folio to remove. * * If swap is getting full, or if there are no more mappings of this folio, * then call folio_free_swap to free its swap space. * * Return: true if we were able to release the swap space. */ bool folio_free_swap(struct folio *folio) { if (!folio_swapcache_freeable(folio)) return false; if (folio_swapped(folio)) return false; delete_from_swap_cache(folio); folio_set_dirty(folio); return true; } /** * free_swap_and_cache_nr() - Release reference on range of swap entries and * reclaim their cache if no more references remain. * @entry: First entry of range. * @nr: Number of entries in range. * * For each swap entry in the contiguous range, release a reference. If any swap * entries become free, try to reclaim their underlying folios, if present. The * offset range is defined by [entry.offset, entry.offset + nr). */ void free_swap_and_cache_nr(swp_entry_t entry, int nr) { const unsigned long start_offset = swp_offset(entry); const unsigned long end_offset = start_offset + nr; struct swap_info_struct *si; bool any_only_cache = false; unsigned long offset; si = get_swap_device(entry); if (!si) return; if (WARN_ON(end_offset > si->max)) goto out; /* * First free all entries in the range. */ any_only_cache = swap_entries_put_map_nr(si, entry, nr); /* * Short-circuit the below loop if none of the entries had their * reference drop to zero. */ if (!any_only_cache) goto out; /* * Now go back over the range trying to reclaim the swap cache. */ for (offset = start_offset; offset < end_offset; offset += nr) { nr = 1; if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { /* * Folios are always naturally aligned in swap so * advance forward to the next boundary. Zero means no * folio was found for the swap entry, so advance by 1 * in this case. Negative value means folio was found * but could not be reclaimed. Here we can still advance * to the next boundary. */ nr = __try_to_reclaim_swap(si, offset, TTRS_UNMAPPED | TTRS_FULL); if (nr == 0) nr = 1; else if (nr < 0) nr = -nr; nr = ALIGN(offset + 1, nr) - offset; } } out: put_swap_device(si); } #ifdef CONFIG_HIBERNATION swp_entry_t get_swap_page_of_type(int type) { struct swap_info_struct *si = swap_type_to_swap_info(type); unsigned long offset; swp_entry_t entry = {0}; if (!si) goto fail; /* This is called for allocating swap entry, not cache */ if (get_swap_device_info(si)) { if (si->flags & SWP_WRITEOK) { offset = cluster_alloc_swap_entry(si, 0, 1); if (offset) { entry = swp_entry(si->type, offset); atomic_long_dec(&nr_swap_pages); } } put_swap_device(si); } fail: return entry; } /* * Find the swap type that corresponds to given device (if any). * * @offset - number of the PAGE_SIZE-sized block of the device, starting * from 0, in which the swap header is expected to be located. * * This is needed for the suspend to disk (aka swsusp). */ int swap_type_of(dev_t device, sector_t offset) { int type; if (!device) return -1; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *sis = swap_info[type]; if (!(sis->flags & SWP_WRITEOK)) continue; if (device == sis->bdev->bd_dev) { struct swap_extent *se = first_se(sis); if (se->start_block == offset) { spin_unlock(&swap_lock); return type; } } } spin_unlock(&swap_lock); return -ENODEV; } int find_first_swap(dev_t *device) { int type; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *sis = swap_info[type]; if (!(sis->flags & SWP_WRITEOK)) continue; *device = sis->bdev->bd_dev; spin_unlock(&swap_lock); return type; } spin_unlock(&swap_lock); return -ENODEV; } /* * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev * corresponding to given index in swap_info (swap type). */ sector_t swapdev_block(int type, pgoff_t offset) { struct swap_info_struct *si = swap_type_to_swap_info(type); struct swap_extent *se; if (!si || !(si->flags & SWP_WRITEOK)) return 0; se = offset_to_swap_extent(si, offset); return se->start_block + (offset - se->start_page); } /* * Return either the total number of swap pages of given type, or the number * of free pages of that type (depending on @free) * * This is needed for software suspend */ unsigned int count_swap_pages(int type, int free) { unsigned int n = 0; spin_lock(&swap_lock); if ((unsigned int)type < nr_swapfiles) { struct swap_info_struct *sis = swap_info[type]; spin_lock(&sis->lock); if (sis->flags & SWP_WRITEOK) { n = sis->pages; if (free) n -= swap_usage_in_pages(sis); } spin_unlock(&sis->lock); } spin_unlock(&swap_lock); return n; } #endif /* CONFIG_HIBERNATION */ static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) { return pte_same(pte_swp_clear_flags(pte), swp_pte); } /* * No need to decide whether this PTE shares the swap entry with others, * just let do_wp_page work it out if a write is requested later - to * force COW, vm_page_prot omits write permission from any private vma. */ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, swp_entry_t entry, struct folio *folio) { struct page *page; struct folio *swapcache; spinlock_t *ptl; pte_t *pte, new_pte, old_pte; bool hwpoisoned = false; int ret = 1; swapcache = folio; folio = ksm_might_need_to_copy(folio, vma, addr); if (unlikely(!folio)) return -ENOMEM; else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { hwpoisoned = true; folio = swapcache; } page = folio_file_page(folio, swp_offset(entry)); if (PageHWPoison(page)) hwpoisoned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), swp_entry_to_pte(entry)))) { ret = 0; goto out; } old_pte = ptep_get(pte); if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) { swp_entry_t swp_entry; dec_mm_counter(vma->vm_mm, MM_SWAPENTS); if (hwpoisoned) { swp_entry = make_hwpoison_entry(page); } else { swp_entry = make_poisoned_swp_entry(); } new_pte = swp_entry_to_pte(swp_entry); ret = 0; goto setpte; } /* * Some architectures may have to restore extra metadata to the page * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ arch_swap_restore(folio_swap(entry, folio), folio); dec_mm_counter(vma->vm_mm, MM_SWAPENTS); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); folio_get(folio); if (folio == swapcache) { rmap_t rmap_flags = RMAP_NONE; /* * See do_swap_page(): writeback would be problematic. * However, we do a folio_wait_writeback() just before this * call and have the folio locked. */ VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; /* * We currently only expect small !anon folios, which are either * fully exclusive or fully shared. If we ever get large folios * here, we have to be careful. */ if (!folio_test_anon(folio)) { VM_WARN_ON_ONCE(folio_test_large(folio)); VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); folio_add_new_anon_rmap(folio, vma, addr, rmap_flags); } else { folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags); } } else { /* ksm created a completely new copy */ folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); folio_add_lru_vma(folio, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); if (pte_swp_soft_dirty(old_pte)) new_pte = pte_mksoft_dirty(new_pte); if (pte_swp_uffd_wp(old_pte)) new_pte = pte_mkuffd_wp(new_pte); setpte: set_pte_at(vma->vm_mm, addr, pte, new_pte); swap_free(entry); out: if (pte) pte_unmap_unlock(pte, ptl); if (folio != swapcache) { folio_unlock(folio); folio_put(folio); } return ret; } static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned int type) { pte_t *pte = NULL; struct swap_info_struct *si; si = swap_info[type]; do { struct folio *folio; unsigned long offset; unsigned char swp_count; swp_entry_t entry; int ret; pte_t ptent; if (!pte++) { pte = pte_offset_map(pmd, addr); if (!pte) break; } ptent = ptep_get_lockless(pte); if (!is_swap_pte(ptent)) continue; entry = pte_to_swp_entry(ptent); if (swp_type(entry) != type) continue; offset = swp_offset(entry); pte_unmap(pte); pte = NULL; folio = swap_cache_get_folio(entry, vma, addr); if (!folio) { struct vm_fault vmf = { .vma = vma, .address = addr, .real_address = addr, .pmd = pmd, }; folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, &vmf); } if (!folio) { swp_count = READ_ONCE(si->swap_map[offset]); if (swp_count == 0 || swp_count == SWAP_MAP_BAD) continue; return -ENOMEM; } folio_lock(folio); folio_wait_writeback(folio); ret = unuse_pte(vma, pmd, addr, entry, folio); if (ret < 0) { folio_unlock(folio); folio_put(folio); return ret; } folio_free_swap(folio); folio_unlock(folio); folio_put(folio); } while (addr += PAGE_SIZE, addr != end); if (pte) pte_unmap(pte); return 0; } static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, unsigned int type) { pmd_t *pmd; unsigned long next; int ret; pmd = pmd_offset(pud, addr); do { cond_resched(); next = pmd_addr_end(addr, end); ret = unuse_pte_range(vma, pmd, addr, next, type); if (ret) return ret; } while (pmd++, addr = next, addr != end); return 0; } static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, unsigned int type) { pud_t *pud; unsigned long next; int ret; pud = pud_offset(p4d, addr); do { next = pud_addr_end(addr, end); if (pud_none_or_clear_bad(pud)) continue; ret = unuse_pmd_range(vma, pud, addr, next, type); if (ret) return ret; } while (pud++, addr = next, addr != end); return 0; } static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, unsigned int type) { p4d_t *p4d; unsigned long next; int ret; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) continue; ret = unuse_pud_range(vma, p4d, addr, next, type); if (ret) return ret; } while (p4d++, addr = next, addr != end); return 0; } static int unuse_vma(struct vm_area_struct *vma, unsigned int type) { pgd_t *pgd; unsigned long addr, end, next; int ret; addr = vma->vm_start; end = vma->vm_end; pgd = pgd_offset(vma->vm_mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) continue; ret = unuse_p4d_range(vma, pgd, addr, next, type); if (ret) return ret; } while (pgd++, addr = next, addr != end); return 0; } static int unuse_mm(struct mm_struct *mm, unsigned int type) { struct vm_area_struct *vma; int ret = 0; VMA_ITERATOR(vmi, mm, 0); mmap_read_lock(mm); for_each_vma(vmi, vma) { if (vma->anon_vma && !is_vm_hugetlb_page(vma)) { ret = unuse_vma(vma, type); if (ret) break; } cond_resched(); } mmap_read_unlock(mm); return ret; } /* * Scan swap_map from current position to next entry still in use. * Return 0 if there are no inuse entries after prev till end of * the map. */ static unsigned int find_next_to_unuse(struct swap_info_struct *si, unsigned int prev) { unsigned int i; unsigned char count; /* * No need for swap_lock here: we're just looking * for whether an entry is in use, not modifying it; false * hits are okay, and sys_swapoff() has already prevented new * allocations from this area (while holding swap_lock). */ for (i = prev + 1; i < si->max; i++) { count = READ_ONCE(si->swap_map[i]); if (count && swap_count(count) != SWAP_MAP_BAD) break; if ((i % LATENCY_LIMIT) == 0) cond_resched(); } if (i == si->max) i = 0; return i; } static int try_to_unuse(unsigned int type) { struct mm_struct *prev_mm; struct mm_struct *mm; struct list_head *p; int retval = 0; struct swap_info_struct *si = swap_info[type]; struct folio *folio; swp_entry_t entry; unsigned int i; if (!swap_usage_in_pages(si)) goto success; retry: retval = shmem_unuse(type); if (retval) return retval; prev_mm = &init_mm; mmget(prev_mm); spin_lock(&mmlist_lock); p = &init_mm.mmlist; while (swap_usage_in_pages(si) && !signal_pending(current) && (p = p->next) != &init_mm.mmlist) { mm = list_entry(p, struct mm_struct, mmlist); if (!mmget_not_zero(mm)) continue; spin_unlock(&mmlist_lock); mmput(prev_mm); prev_mm = mm; retval = unuse_mm(mm, type); if (retval) { mmput(prev_mm); return retval; } /* * Make sure that we aren't completely killing * interactive performance. */ cond_resched(); spin_lock(&mmlist_lock); } spin_unlock(&mmlist_lock); mmput(prev_mm); i = 0; while (swap_usage_in_pages(si) && !signal_pending(current) && (i = find_next_to_unuse(si, i)) != 0) { entry = swp_entry(type, i); folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry)); if (IS_ERR(folio)) continue; /* * It is conceivable that a racing task removed this folio from * swap cache just before we acquired the page lock. The folio * might even be back in swap cache on another swap area. But * that is okay, folio_free_swap() only removes stale folios. */ folio_lock(folio); folio_wait_writeback(folio); folio_free_swap(folio); folio_unlock(folio); folio_put(folio); } /* * Lets check again to see if there are still swap entries in the map. * If yes, we would need to do retry the unuse logic again. * Under global memory pressure, swap entries can be reinserted back * into process space after the mmlist loop above passes over them. * * Limit the number of retries? No: when mmget_not_zero() * above fails, that mm is likely to be freeing swap from * exit_mmap(), which proceeds at its own independent pace; * and even shmem_writeout() could have been preempted after * folio_alloc_swap(), temporarily hiding that swap. It's easy * and robust (though cpu-intensive) just to keep retrying. */ if (swap_usage_in_pages(si)) { if (!signal_pending(current)) goto retry; return -EINTR; } success: /* * Make sure that further cleanups after try_to_unuse() returns happen * after swap_range_free() reduces si->inuse_pages to 0. */ smp_mb(); return 0; } /* * After a successful try_to_unuse, if no swap is now in use, we know * we can empty the mmlist. swap_lock must be held on entry and exit. * Note that mmlist_lock nests inside swap_lock, and an mm must be * added to the mmlist just after page_duplicate - before would be racy. */ static void drain_mmlist(void) { struct list_head *p, *next; unsigned int type; for (type = 0; type < nr_swapfiles; type++) if (swap_usage_in_pages(swap_info[type])) return; spin_lock(&mmlist_lock); list_for_each_safe(p, next, &init_mm.mmlist) list_del_init(p); spin_unlock(&mmlist_lock); } /* * Free all of a swapdev's extent information */ static void destroy_swap_extents(struct swap_info_struct *sis) { while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) { struct rb_node *rb = sis->swap_extent_root.rb_node; struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node); rb_erase(rb, &sis->swap_extent_root); kfree(se); } if (sis->flags & SWP_ACTIVATED) { struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; sis->flags &= ~SWP_ACTIVATED; if (mapping->a_ops->swap_deactivate) mapping->a_ops->swap_deactivate(swap_file); } } /* * Add a block range (and the corresponding page range) into this swapdev's * extent tree. * * This function rather assumes that it is called in ascending page order. */ int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, unsigned long nr_pages, sector_t start_block) { struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL; struct swap_extent *se; struct swap_extent *new_se; /* * place the new node at the right most since the * function is called in ascending page order. */ while (*link) { parent = *link; link = &parent->rb_right; } if (parent) { se = rb_entry(parent, struct swap_extent, rb_node); BUG_ON(se->start_page + se->nr_pages != start_page); if (se->start_block + se->nr_pages == start_block) { /* Merge it */ se->nr_pages += nr_pages; return 0; } } /* No merge, insert a new extent. */ new_se = kmalloc(sizeof(*se), GFP_KERNEL); if (new_se == NULL) return -ENOMEM; new_se->start_page = start_page; new_se->nr_pages = nr_pages; new_se->start_block = start_block; rb_link_node(&new_se->rb_node, parent, link); rb_insert_color(&new_se->rb_node, &sis->swap_extent_root); return 1; } EXPORT_SYMBOL_GPL(add_swap_extent); /* * A `swap extent' is a simple thing which maps a contiguous range of pages * onto a contiguous range of disk blocks. A rbtree of swap extents is * built at swapon time and is then used at swap_writepage/swap_read_folio * time for locating where on disk a page belongs. * * If the swapfile is an S_ISBLK block device, a single extent is installed. * This is done so that the main operating code can treat S_ISBLK and S_ISREG * swap files identically. * * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK * swapfiles are handled *identically* after swapon time. * * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray * blocks are found which do not fall within the PAGE_SIZE alignment * requirements, they are simply tossed out - we will never use those blocks * for swapping. * * For all swap devices we set S_SWAPFILE across the life of the swapon. This * prevents users from writing to the swap device, which will corrupt memory. * * The amount of disk space which a single swap extent represents varies. * Typically it is in the 1-4 megabyte range. So we can have hundreds of * extents in the rbtree. - akpm. */ static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) { struct file *swap_file = sis->swap_file; struct address_space *mapping = swap_file->f_mapping; struct inode *inode = mapping->host; int ret; if (S_ISBLK(inode->i_mode)) { ret = add_swap_extent(sis, 0, sis->max, 0); *span = sis->pages; return ret; } if (mapping->a_ops->swap_activate) { ret = mapping->a_ops->swap_activate(sis, swap_file, span); if (ret < 0) return ret; sis->flags |= SWP_ACTIVATED; if ((sis->flags & SWP_FS_OPS) && sio_pool_init() != 0) { destroy_swap_extents(sis); return -ENOMEM; } return ret; } return generic_swapfile_activate(sis, swap_file, span); } static int swap_node(struct swap_info_struct *si) { struct block_device *bdev; if (si->bdev) bdev = si->bdev; else bdev = si->swap_file->f_inode->i_sb->s_bdev; return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE; } static void setup_swap_info(struct swap_info_struct *si, int prio, unsigned char *swap_map, struct swap_cluster_info *cluster_info, unsigned long *zeromap) { int i; if (prio >= 0) si->prio = prio; else si->prio = --least_priority; /* * the plist prio is negated because plist ordering is * low-to-high, while swap ordering is high-to-low */ si->list.prio = -si->prio; for_each_node(i) { if (si->prio >= 0) si->avail_lists[i].prio = -si->prio; else { if (swap_node(si) == i) si->avail_lists[i].prio = 1; else si->avail_lists[i].prio = -si->prio; } } si->swap_map = swap_map; si->cluster_info = cluster_info; si->zeromap = zeromap; } static void _enable_swap_info(struct swap_info_struct *si) { atomic_long_add(si->pages, &nr_swap_pages); total_swap_pages += si->pages; assert_spin_locked(&swap_lock); /* * both lists are plists, and thus priority ordered. * swap_active_head needs to be priority ordered for swapoff(), * which on removal of any swap_info_struct with an auto-assigned * (i.e. negative) priority increments the auto-assigned priority * of any lower-priority swap_info_structs. * swap_avail_head needs to be priority ordered for folio_alloc_swap(), * which allocates swap pages from the highest available priority * swap_info_struct. */ plist_add(&si->list, &swap_active_head); /* Add back to available list */ add_to_avail_list(si, true); } static void enable_swap_info(struct swap_info_struct *si, int prio, unsigned char *swap_map, struct swap_cluster_info *cluster_info, unsigned long *zeromap) { spin_lock(&swap_lock); spin_lock(&si->lock); setup_swap_info(si, prio, swap_map, cluster_info, zeromap); spin_unlock(&si->lock); spin_unlock(&swap_lock); /* * Finished initializing swap device, now it's safe to reference it. */ percpu_ref_resurrect(&si->users); spin_lock(&swap_lock); spin_lock(&si->lock); _enable_swap_info(si); spin_unlock(&si->lock); spin_unlock(&swap_lock); } static void reinsert_swap_info(struct swap_info_struct *si) { spin_lock(&swap_lock); spin_lock(&si->lock); setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap); _enable_swap_info(si); spin_unlock(&si->lock); spin_unlock(&swap_lock); } /* * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range * see the updated flags, so there will be no more allocations. */ static void wait_for_allocation(struct swap_info_struct *si) { unsigned long offset; unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER); struct swap_cluster_info *ci; BUG_ON(si->flags & SWP_WRITEOK); for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) { ci = lock_cluster(si, offset); unlock_cluster(ci); } } /* * Called after swap device's reference count is dead, so * neither scan nor allocation will use it. */ static void flush_percpu_swap_cluster(struct swap_info_struct *si) { int cpu, i; struct swap_info_struct **pcp_si; for_each_possible_cpu(cpu) { pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu); /* * Invalidate the percpu swap cluster cache, si->users * is dead, so no new user will point to it, just flush * any existing user. */ for (i = 0; i < SWAP_NR_ORDERS; i++) cmpxchg(&pcp_si[i], si, NULL); } } SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct *p = NULL; unsigned char *swap_map; unsigned long *zeromap; struct swap_cluster_info *cluster_info; struct file *swap_file, *victim; struct address_space *mapping; struct inode *inode; struct filename *pathname; int err, found = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; BUG_ON(!current->mm); pathname = getname(specialfile); if (IS_ERR(pathname)) return PTR_ERR(pathname); victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); err = PTR_ERR(victim); if (IS_ERR(victim)) goto out; mapping = victim->f_mapping; spin_lock(&swap_lock); plist_for_each_entry(p, &swap_active_head, list) { if (p->flags & SWP_WRITEOK) { if (p->swap_file->f_mapping == mapping) { found = 1; break; } } } if (!found) { err = -EINVAL; spin_unlock(&swap_lock); goto out_dput; } if (!security_vm_enough_memory_mm(current->mm, p->pages)) vm_unacct_memory(p->pages); else { err = -ENOMEM; spin_unlock(&swap_lock); goto out_dput; } spin_lock(&p->lock); del_from_avail_list(p, true); if (p->prio < 0) { struct swap_info_struct *si = p; int nid; plist_for_each_entry_continue(si, &swap_active_head, list) { si->prio++; si->list.prio--; for_each_node(nid) { if (si->avail_lists[nid].prio != 1) si->avail_lists[nid].prio--; } } least_priority++; } plist_del(&p->list, &swap_active_head); atomic_long_sub(p->pages, &nr_swap_pages); total_swap_pages -= p->pages; spin_unlock(&p->lock); spin_unlock(&swap_lock); wait_for_allocation(p); set_current_oom_origin(); err = try_to_unuse(p->type); clear_current_oom_origin(); if (err) { /* re-insert swap space back into swap_list */ reinsert_swap_info(p); goto out_dput; } /* * Wait for swap operations protected by get/put_swap_device() * to complete. Because of synchronize_rcu() here, all swap * operations protected by RCU reader side lock (including any * spinlock) will be waited too. This makes it easy to * prevent folio_test_swapcache() and the following swap cache * operations from racing with swapoff. */ percpu_ref_kill(&p->users); synchronize_rcu(); wait_for_completion(&p->comp); flush_work(&p->discard_work); flush_work(&p->reclaim_work); flush_percpu_swap_cluster(p); destroy_swap_extents(p); if (p->flags & SWP_CONTINUED) free_swap_count_continuations(p); if (!p->bdev || !bdev_nonrot(p->bdev)) atomic_dec(&nr_rotate_swap); mutex_lock(&swapon_mutex); spin_lock(&swap_lock); spin_lock(&p->lock); drain_mmlist(); swap_file = p->swap_file; p->swap_file = NULL; p->max = 0; swap_map = p->swap_map; p->swap_map = NULL; zeromap = p->zeromap; p->zeromap = NULL; cluster_info = p->cluster_info; p->cluster_info = NULL; spin_unlock(&p->lock); spin_unlock(&swap_lock); arch_swap_invalidate_area(p->type); zswap_swapoff(p->type); mutex_unlock(&swapon_mutex); kfree(p->global_cluster); p->global_cluster = NULL; vfree(swap_map); kvfree(zeromap); kvfree(cluster_info); /* Destroy swap account information */ swap_cgroup_swapoff(p->type); exit_swap_address_space(p->type); inode = mapping->host; inode_lock(inode); inode->i_flags &= ~S_SWAPFILE; inode_unlock(inode); filp_close(swap_file, NULL); /* * Clear the SWP_USED flag after all resources are freed so that swapon * can reuse this swap_info in alloc_swap_info() safely. It is ok to * not hold p->lock after we cleared its SWP_WRITEOK. */ spin_lock(&swap_lock); p->flags = 0; spin_unlock(&swap_lock); err = 0; atomic_inc(&proc_poll_event); wake_up_interruptible(&proc_poll_wait); out_dput: filp_close(victim, NULL); out: putname(pathname); return err; } #ifdef CONFIG_PROC_FS static __poll_t swaps_poll(struct file *file, poll_table *wait) { struct seq_file *seq = file->private_data; poll_wait(file, &proc_poll_wait, wait); if (seq->poll_event != atomic_read(&proc_poll_event)) { seq->poll_event = atomic_read(&proc_poll_event); return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; } return EPOLLIN | EPOLLRDNORM; } /* iterator */ static void *swap_start(struct seq_file *swap, loff_t *pos) { struct swap_info_struct *si; int type; loff_t l = *pos; mutex_lock(&swapon_mutex); if (!l) return SEQ_START_TOKEN; for (type = 0; (si = swap_type_to_swap_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; if (!--l) return si; } return NULL; } static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) { struct swap_info_struct *si = v; int type; if (v == SEQ_START_TOKEN) type = 0; else type = si->type + 1; ++(*pos); for (; (si = swap_type_to_swap_info(type)); type++) { if (!(si->flags & SWP_USED) || !si->swap_map) continue; return si; } return NULL; } static void swap_stop(struct seq_file *swap, void *v) { mutex_unlock(&swapon_mutex); } static int swap_show(struct seq_file *swap, void *v) { struct swap_info_struct *si = v; struct file *file; int len; unsigned long bytes, inuse; if (si == SEQ_START_TOKEN) { seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); return 0; } bytes = K(si->pages); inuse = K(swap_usage_in_pages(si)); file = si->swap_file; len = seq_file_path(swap, file, " \t\n\\"); seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n", len < 40 ? 40 - len : 1, " ", S_ISBLK(file_inode(file)->i_mode) ? "partition" : "file\t", bytes, bytes < 10000000 ? "\t" : "", inuse, inuse < 10000000 ? "\t" : "", si->prio); return 0; } static const struct seq_operations swaps_op = { .start = swap_start, .next = swap_next, .stop = swap_stop, .show = swap_show }; static int swaps_open(struct inode *inode, struct file *file) { struct seq_file *seq; int ret; ret = seq_open(file, &swaps_op); if (ret) return ret; seq = file->private_data; seq->poll_event = atomic_read(&proc_poll_event); return 0; } static const struct proc_ops swaps_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = swaps_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release, .proc_poll = swaps_poll, }; static int __init procswaps_init(void) { proc_create("swaps", 0, NULL, &swaps_proc_ops); return 0; } __initcall(procswaps_init); #endif /* CONFIG_PROC_FS */ #ifdef MAX_SWAPFILES_CHECK static int __init max_swapfiles_check(void) { MAX_SWAPFILES_CHECK(); return 0; } late_initcall(max_swapfiles_check); #endif static struct swap_info_struct *alloc_swap_info(void) { struct swap_info_struct *p; struct swap_info_struct *defer = NULL; unsigned int type; int i; p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (percpu_ref_init(&p->users, swap_users_ref_free, PERCPU_REF_INIT_DEAD, GFP_KERNEL)) { kvfree(p); return ERR_PTR(-ENOMEM); } spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { if (!(swap_info[type]->flags & SWP_USED)) break; } if (type >= MAX_SWAPFILES) { spin_unlock(&swap_lock); percpu_ref_exit(&p->users); kvfree(p); return ERR_PTR(-EPERM); } if (type >= nr_swapfiles) { p->type = type; /* * Publish the swap_info_struct after initializing it. * Note that kvzalloc() above zeroes all its fields. */ smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */ nr_swapfiles++; } else { defer = p; p = swap_info[type]; /* * Do not memset this entry: a racing procfs swap_next() * would be relying on p->type to remain valid. */ } p->swap_extent_root = RB_ROOT; plist_node_init(&p->list, 0); for_each_node(i) plist_node_init(&p->avail_lists[i], 0); p->flags = SWP_USED; spin_unlock(&swap_lock); if (defer) { percpu_ref_exit(&defer->users); kvfree(defer); } spin_lock_init(&p->lock); spin_lock_init(&p->cont_lock); atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT); init_completion(&p->comp); return p; } static int claim_swapfile(struct swap_info_struct *si, struct inode *inode) { if (S_ISBLK(inode->i_mode)) { si->bdev = I_BDEV(inode); /* * Zoned block devices contain zones that have a sequential * write only restriction. Hence zoned block devices are not * suitable for swapping. Disallow them here. */ if (bdev_is_zoned(si->bdev)) return -EINVAL; si->flags |= SWP_BLKDEV; } else if (S_ISREG(inode->i_mode)) { si->bdev = inode->i_sb->s_bdev; } return 0; } /* * Find out how many pages are allowed for a single swap device. There * are two limiting factors: * 1) the number of bits for the swap offset in the swp_entry_t type, and * 2) the number of bits in the swap pte, as defined by the different * architectures. * * In order to find the largest possible bit mask, a swap entry with * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, * decoded to a swp_entry_t again, and finally the swap offset is * extracted. * * This will mask all the bits from the initial ~0UL mask that can't * be encoded in either the swp_entry_t or the architecture definition * of a swap pte. */ unsigned long generic_max_swapfile_size(void) { return swp_offset(pte_to_swp_entry( swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; } /* Can be overridden by an architecture for additional checks. */ __weak unsigned long arch_max_swapfile_size(void) { return generic_max_swapfile_size(); } static unsigned long read_swap_header(struct swap_info_struct *si, union swap_header *swap_header, struct inode *inode) { int i; unsigned long maxpages; unsigned long swapfilepages; unsigned long last_page; if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { pr_err("Unable to find swap-space signature\n"); return 0; } /* swap partition endianness hack... */ if (swab32(swap_header->info.version) == 1) { swab32s(&swap_header->info.version); swab32s(&swap_header->info.last_page); swab32s(&swap_header->info.nr_badpages); if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) return 0; for (i = 0; i < swap_header->info.nr_badpages; i++) swab32s(&swap_header->info.badpages[i]); } /* Check the swap header's sub-version */ if (swap_header->info.version != 1) { pr_warn("Unable to handle swap header version %d\n", swap_header->info.version); return 0; } maxpages = swapfile_maximum_size; last_page = swap_header->info.last_page; if (!last_page) { pr_warn("Empty swap-file\n"); return 0; } if (last_page > maxpages) { pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", K(maxpages), K(last_page)); } if (maxpages > last_page) { maxpages = last_page + 1; /* p->max is an unsigned int: don't overflow it */ if ((unsigned int)maxpages == 0) maxpages = UINT_MAX; } if (!maxpages) return 0; swapfilepages = i_size_read(inode) >> PAGE_SHIFT; if (swapfilepages && maxpages > swapfilepages) { pr_warn("Swap area shorter than signature indicates\n"); return 0; } if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) return 0; if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) return 0; return maxpages; } static int setup_swap_map_and_extents(struct swap_info_struct *si, union swap_header *swap_header, unsigned char *swap_map, unsigned long maxpages, sector_t *span) { unsigned int nr_good_pages; unsigned long i; int nr_extents; nr_good_pages = maxpages - 1; /* omit header page */ for (i = 0; i < swap_header->info.nr_badpages; i++) { unsigned int page_nr = swap_header->info.badpages[i]; if (page_nr == 0 || page_nr > swap_header->info.last_page) return -EINVAL; if (page_nr < maxpages) { swap_map[page_nr] = SWAP_MAP_BAD; nr_good_pages--; } } if (nr_good_pages) { swap_map[0] = SWAP_MAP_BAD; si->max = maxpages; si->pages = nr_good_pages; nr_extents = setup_swap_extents(si, span); if (nr_extents < 0) return nr_extents; nr_good_pages = si->pages; } if (!nr_good_pages) { pr_warn("Empty swap-file\n"); return -EINVAL; } return nr_extents; } #define SWAP_CLUSTER_INFO_COLS \ DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) #define SWAP_CLUSTER_SPACE_COLS \ DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) #define SWAP_CLUSTER_COLS \ max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, union swap_header *swap_header, unsigned long maxpages) { unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); struct swap_cluster_info *cluster_info; unsigned long i, j, idx; int err = -ENOMEM; cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL); if (!cluster_info) goto err; for (i = 0; i < nr_clusters; i++) spin_lock_init(&cluster_info[i].lock); if (!(si->flags & SWP_SOLIDSTATE)) { si->global_cluster = kmalloc(sizeof(*si->global_cluster), GFP_KERNEL); if (!si->global_cluster) goto err_free; for (i = 0; i < SWAP_NR_ORDERS; i++) si->global_cluster->next[i] = SWAP_ENTRY_INVALID; spin_lock_init(&si->global_cluster_lock); } /* * Mark unusable pages as unavailable. The clusters aren't * marked free yet, so no list operations are involved yet. * * See setup_swap_map_and_extents(): header page, bad pages, * and the EOF part of the last cluster. */ inc_cluster_info_page(si, cluster_info, 0); for (i = 0; i < swap_header->info.nr_badpages; i++) inc_cluster_info_page(si, cluster_info, swap_header->info.badpages[i]); for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) inc_cluster_info_page(si, cluster_info, i); INIT_LIST_HEAD(&si->free_clusters); INIT_LIST_HEAD(&si->full_clusters); INIT_LIST_HEAD(&si->discard_clusters); for (i = 0; i < SWAP_NR_ORDERS; i++) { INIT_LIST_HEAD(&si->nonfull_clusters[i]); INIT_LIST_HEAD(&si->frag_clusters[i]); atomic_long_set(&si->frag_cluster_nr[i], 0); } /* * Reduce false cache line sharing between cluster_info and * sharing same address space. */ for (j = 0; j < SWAP_CLUSTER_COLS; j++) { for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { struct swap_cluster_info *ci; idx = i * SWAP_CLUSTER_COLS + j; ci = cluster_info + idx; if (idx >= nr_clusters) continue; if (ci->count) { ci->flags = CLUSTER_FLAG_NONFULL; list_add_tail(&ci->list, &si->nonfull_clusters[0]); continue; } ci->flags = CLUSTER_FLAG_FREE; list_add_tail(&ci->list, &si->free_clusters); } } return cluster_info; err_free: kvfree(cluster_info); err: return ERR_PTR(err); } SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) { struct swap_info_struct *si; struct filename *name; struct file *swap_file = NULL; struct address_space *mapping; struct dentry *dentry; int prio; int error; union swap_header *swap_header; int nr_extents; sector_t span; unsigned long maxpages; unsigned char *swap_map = NULL; unsigned long *zeromap = NULL; struct swap_cluster_info *cluster_info = NULL; struct folio *folio = NULL; struct inode *inode = NULL; bool inced_nr_rotate_swap = false; if (swap_flags & ~SWAP_FLAGS_VALID) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!swap_avail_heads) return -ENOMEM; si = alloc_swap_info(); if (IS_ERR(si)) return PTR_ERR(si); INIT_WORK(&si->discard_work, swap_discard_work); INIT_WORK(&si->reclaim_work, swap_reclaim_work); name = getname(specialfile); if (IS_ERR(name)) { error = PTR_ERR(name); name = NULL; goto bad_swap; } swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0); if (IS_ERR(swap_file)) { error = PTR_ERR(swap_file); swap_file = NULL; goto bad_swap; } si->swap_file = swap_file; mapping = swap_file->f_mapping; dentry = swap_file->f_path.dentry; inode = mapping->host; error = claim_swapfile(si, inode); if (unlikely(error)) goto bad_swap; inode_lock(inode); if (d_unlinked(dentry) || cant_mount(dentry)) { error = -ENOENT; goto bad_swap_unlock_inode; } if (IS_SWAPFILE(inode)) { error = -EBUSY; goto bad_swap_unlock_inode; } /* * The swap subsystem needs a major overhaul to support this. * It doesn't work yet so just disable it for now. */ if (mapping_min_folio_order(mapping) > 0) { error = -EINVAL; goto bad_swap_unlock_inode; } /* * Read the swap header. */ if (!mapping->a_ops->read_folio) { error = -EINVAL; goto bad_swap_unlock_inode; } folio = read_mapping_folio(mapping, 0, swap_file); if (IS_ERR(folio)) { error = PTR_ERR(folio); goto bad_swap_unlock_inode; } swap_header = kmap_local_folio(folio, 0); maxpages = read_swap_header(si, swap_header, inode); if (unlikely(!maxpages)) { error = -EINVAL; goto bad_swap_unlock_inode; } /* OK, set up the swap map and apply the bad block list */ swap_map = vzalloc(maxpages); if (!swap_map) { error = -ENOMEM; goto bad_swap_unlock_inode; } error = swap_cgroup_swapon(si->type, maxpages); if (error) goto bad_swap_unlock_inode; nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map, maxpages, &span); if (unlikely(nr_extents < 0)) { error = nr_extents; goto bad_swap_unlock_inode; } /* * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might * be above MAX_PAGE_ORDER incase of a large swap file. */ zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long), GFP_KERNEL | __GFP_ZERO); if (!zeromap) { error = -ENOMEM; goto bad_swap_unlock_inode; } if (si->bdev && bdev_stable_writes(si->bdev)) si->flags |= SWP_STABLE_WRITES; if (si->bdev && bdev_synchronous(si->bdev)) si->flags |= SWP_SYNCHRONOUS_IO; if (si->bdev && bdev_nonrot(si->bdev)) { si->flags |= SWP_SOLIDSTATE; } else { atomic_inc(&nr_rotate_swap); inced_nr_rotate_swap = true; } cluster_info = setup_clusters(si, swap_header, maxpages); if (IS_ERR(cluster_info)) { error = PTR_ERR(cluster_info); cluster_info = NULL; goto bad_swap_unlock_inode; } if ((swap_flags & SWAP_FLAG_DISCARD) && si->bdev && bdev_max_discard_sectors(si->bdev)) { /* * When discard is enabled for swap with no particular * policy flagged, we set all swap discard flags here in * order to sustain backward compatibility with older * swapon(8) releases. */ si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | SWP_PAGE_DISCARD); /* * By flagging sys_swapon, a sysadmin can tell us to * either do single-time area discards only, or to just * perform discards for released swap page-clusters. * Now it's time to adjust the p->flags accordingly. */ if (swap_flags & SWAP_FLAG_DISCARD_ONCE) si->flags &= ~SWP_PAGE_DISCARD; else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) si->flags &= ~SWP_AREA_DISCARD; /* issue a swapon-time discard if it's still required */ if (si->flags & SWP_AREA_DISCARD) { int err = discard_swap(si); if (unlikely(err)) pr_err("swapon: discard_swap(%p): %d\n", si, err); } } error = init_swap_address_space(si->type, maxpages); if (error) goto bad_swap_unlock_inode; error = zswap_swapon(si->type, maxpages); if (error) goto free_swap_address_space; /* * Flush any pending IO and dirty mappings before we start using this * swap device. */ inode->i_flags |= S_SWAPFILE; error = inode_drain_writes(inode); if (error) { inode->i_flags &= ~S_SWAPFILE; goto free_swap_zswap; } mutex_lock(&swapon_mutex); prio = -1; if (swap_flags & SWAP_FLAG_PREFER) prio = swap_flags & SWAP_FLAG_PRIO_MASK; enable_swap_info(si, prio, swap_map, cluster_info, zeromap); pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n", K(si->pages), name->name, si->prio, nr_extents, K((unsigned long long)span), (si->flags & SWP_SOLIDSTATE) ? "SS" : "", (si->flags & SWP_DISCARDABLE) ? "D" : "", (si->flags & SWP_AREA_DISCARD) ? "s" : "", (si->flags & SWP_PAGE_DISCARD) ? "c" : ""); mutex_unlock(&swapon_mutex); atomic_inc(&proc_poll_event); wake_up_interruptible(&proc_poll_wait); error = 0; goto out; free_swap_zswap: zswap_swapoff(si->type); free_swap_address_space: exit_swap_address_space(si->type); bad_swap_unlock_inode: inode_unlock(inode); bad_swap: kfree(si->global_cluster); si->global_cluster = NULL; inode = NULL; destroy_swap_extents(si); swap_cgroup_swapoff(si->type); spin_lock(&swap_lock); si->swap_file = NULL; si->flags = 0; spin_unlock(&swap_lock); vfree(swap_map); kvfree(zeromap); kvfree(cluster_info); if (inced_nr_rotate_swap) atomic_dec(&nr_rotate_swap); if (swap_file) filp_close(swap_file, NULL); out: if (!IS_ERR_OR_NULL(folio)) folio_release_kmap(folio, swap_header); if (name) putname(name); if (inode) inode_unlock(inode); return error; } void si_swapinfo(struct sysinfo *val) { unsigned int type; unsigned long nr_to_be_unused = 0; spin_lock(&swap_lock); for (type = 0; type < nr_swapfiles; type++) { struct swap_info_struct *si = swap_info[type]; if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) nr_to_be_unused += swap_usage_in_pages(si); } val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; spin_unlock(&swap_lock); } /* * Verify that nr swap entries are valid and increment their swap map counts. * * Returns error code in following case. * - success -> 0 * - swp_entry is invalid -> EINVAL * - swap-cache reference is requested but there is already one. -> EEXIST * - swap-cache reference is requested but the entry is not used. -> ENOENT * - swap-mapped reference requested but needs continued swap count. -> ENOMEM */ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr) { struct swap_info_struct *si; struct swap_cluster_info *ci; unsigned long offset; unsigned char count; unsigned char has_cache; int err, i; si = swp_swap_info(entry); if (WARN_ON_ONCE(!si)) { pr_err("%s%08lx\n", Bad_file, entry.val); return -EINVAL; } offset = swp_offset(entry); VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); VM_WARN_ON(usage == 1 && nr > 1); ci = lock_cluster(si, offset); err = 0; for (i = 0; i < nr; i++) { count = si->swap_map[offset + i]; /* * swapin_readahead() doesn't check if a swap entry is valid, so the * swap entry could be SWAP_MAP_BAD. Check here with lock held. */ if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { err = -ENOENT; goto unlock_out; } has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; if (!count && !has_cache) { err = -ENOENT; } else if (usage == SWAP_HAS_CACHE) { if (has_cache) err = -EEXIST; } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) { err = -EINVAL; } if (err) goto unlock_out; } for (i = 0; i < nr; i++) { count = si->swap_map[offset + i]; has_cache = count & SWAP_HAS_CACHE; count &= ~SWAP_HAS_CACHE; if (usage == SWAP_HAS_CACHE) has_cache = SWAP_HAS_CACHE; else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) count += usage; else if (swap_count_continued(si, offset + i, count)) count = COUNT_CONTINUED; else { /* * Don't need to rollback changes, because if * usage == 1, there must be nr == 1. */ err = -ENOMEM; goto unlock_out; } WRITE_ONCE(si->swap_map[offset + i], count | has_cache); } unlock_out: unlock_cluster(ci); return err; } /* * Help swapoff by noting that swap entry belongs to shmem/tmpfs * (in which case its reference count is never incremented). */ void swap_shmem_alloc(swp_entry_t entry, int nr) { __swap_duplicate(entry, SWAP_MAP_SHMEM, nr); } /* * Increase reference count of swap entry by 1. * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required * but could not be atomically allocated. Returns 0, just as if it succeeded, * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which * might occur if a page table entry has got corrupted. */ int swap_duplicate(swp_entry_t entry) { int err = 0; while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM) err = add_swap_count_continuation(entry, GFP_ATOMIC); return err; } /* * @entry: first swap entry from which we allocate nr swap cache. * * Called when allocating swap cache for existing swap entries, * This can return error codes. Returns 0 at success. * -EEXIST means there is a swap cache. * Note: return code is different from swap_duplicate(). */ int swapcache_prepare(swp_entry_t entry, int nr) { return __swap_duplicate(entry, SWAP_HAS_CACHE, nr); } /* * Caller should ensure entries belong to the same folio so * the entries won't span cross cluster boundary. */ void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) { swap_entries_put_cache(si, entry, nr); } struct swap_info_struct *swp_swap_info(swp_entry_t entry) { return swap_type_to_swap_info(swp_type(entry)); } /* * add_swap_count_continuation - called when a swap count is duplicated * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's * page of the original vmalloc'ed swap_map, to hold the continuation count * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. * * These continuation pages are seldom referenced: the common paths all work * on the original swap_map, only referring to a continuation page when the * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. * * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) * can be called after dropping locks. */ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) { struct swap_info_struct *si; struct swap_cluster_info *ci; struct page *head; struct page *page; struct page *list_page; pgoff_t offset; unsigned char count; int ret = 0; /* * When debugging, it's easier to use __GFP_ZERO here; but it's better * for latency not to zero a page while GFP_ATOMIC and holding locks. */ page = alloc_page(gfp_mask | __GFP_HIGHMEM); si = get_swap_device(entry); if (!si) { /* * An acceptable race has occurred since the failing * __swap_duplicate(): the swap device may be swapoff */ goto outer; } offset = swp_offset(entry); ci = lock_cluster(si, offset); count = swap_count(si->swap_map[offset]); if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { /* * The higher the swap count, the more likely it is that tasks * will race to add swap count continuation: we need to avoid * over-provisioning. */ goto out; } if (!page) { ret = -ENOMEM; goto out; } head = vmalloc_to_page(si->swap_map + offset); offset &= ~PAGE_MASK; spin_lock(&si->cont_lock); /* * Page allocation does not initialize the page's lru field, * but it does always reset its private field. */ if (!page_private(head)) { BUG_ON(count & COUNT_CONTINUED); INIT_LIST_HEAD(&head->lru); set_page_private(head, SWP_CONTINUED); si->flags |= SWP_CONTINUED; } list_for_each_entry(list_page, &head->lru, lru) { unsigned char *map; /* * If the previous map said no continuation, but we've found * a continuation page, free our allocation and use this one. */ if (!(count & COUNT_CONTINUED)) goto out_unlock_cont; map = kmap_local_page(list_page) + offset; count = *map; kunmap_local(map); /* * If this continuation count now has some space in it, * free our allocation and use this one. */ if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) goto out_unlock_cont; } list_add_tail(&page->lru, &head->lru); page = NULL; /* now it's attached, don't free it */ out_unlock_cont: spin_unlock(&si->cont_lock); out: unlock_cluster(ci); put_swap_device(si); outer: if (page) __free_page(page); return ret; } /* * swap_count_continued - when the original swap_map count is incremented * from SWAP_MAP_MAX, check if there is already a continuation page to carry * into, carry if so, or else fail until a new continuation page is allocated; * when the original swap_map count is decremented from 0 with continuation, * borrow from the continuation and report whether it still holds more. * Called while __swap_duplicate() or caller of swap_entry_put_locked() * holds cluster lock. */ static bool swap_count_continued(struct swap_info_struct *si, pgoff_t offset, unsigned char count) { struct page *head; struct page *page; unsigned char *map; bool ret; head = vmalloc_to_page(si->swap_map + offset); if (page_private(head) != SWP_CONTINUED) { BUG_ON(count & COUNT_CONTINUED); return false; /* need to add count continuation */ } spin_lock(&si->cont_lock); offset &= ~PAGE_MASK; page = list_next_entry(head, lru); map = kmap_local_page(page) + offset; if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ goto init_map; /* jump over SWAP_CONT_MAX checks */ if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ /* * Think of how you add 1 to 999 */ while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { kunmap_local(map); page = list_next_entry(page, lru); BUG_ON(page == head); map = kmap_local_page(page) + offset; } if (*map == SWAP_CONT_MAX) { kunmap_local(map); page = list_next_entry(page, lru); if (page == head) { ret = false; /* add count continuation */ goto out; } map = kmap_local_page(page) + offset; init_map: *map = 0; /* we didn't zero the page */ } *map += 1; kunmap_local(map); while ((page = list_prev_entry(page, lru)) != head) { map = kmap_local_page(page) + offset; *map = COUNT_CONTINUED; kunmap_local(map); } ret = true; /* incremented */ } else { /* decrementing */ /* * Think of how you subtract 1 from 1000 */ BUG_ON(count != COUNT_CONTINUED); while (*map == COUNT_CONTINUED) { kunmap_local(map); page = list_next_entry(page, lru); BUG_ON(page == head); map = kmap_local_page(page) + offset; } BUG_ON(*map == 0); *map -= 1; if (*map == 0) count = 0; kunmap_local(map); while ((page = list_prev_entry(page, lru)) != head) { map = kmap_local_page(page) + offset; *map = SWAP_CONT_MAX | count; count = COUNT_CONTINUED; kunmap_local(map); } ret = count == COUNT_CONTINUED; } out: spin_unlock(&si->cont_lock); return ret; } /* * free_swap_count_continuations - swapoff free all the continuation pages * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. */ static void free_swap_count_continuations(struct swap_info_struct *si) { pgoff_t offset; for (offset = 0; offset < si->max; offset += PAGE_SIZE) { struct page *head; head = vmalloc_to_page(si->swap_map + offset); if (page_private(head)) { struct page *page, *next; list_for_each_entry_safe(page, next, &head->lru, lru) { list_del(&page->lru); __free_page(page); } } } } #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) static bool __has_usable_swap(void) { return !plist_head_empty(&swap_active_head); } void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp) { struct swap_info_struct *si, *next; int nid = folio_nid(folio); if (!(gfp & __GFP_IO)) return; if (!__has_usable_swap()) return; if (!blk_cgroup_congested()) return; /* * We've already scheduled a throttle, avoid taking the global swap * lock. */ if (current->throttle_disk) return; spin_lock(&swap_avail_lock); plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], avail_lists[nid]) { if (si->bdev) { blkcg_schedule_throttle(si->bdev->bd_disk, true); break; } } spin_unlock(&swap_avail_lock); } #endif static int __init swapfile_init(void) { int nid; swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head), GFP_KERNEL); if (!swap_avail_heads) { pr_emerg("Not enough memory for swap heads, swap is disabled\n"); return -ENOMEM; } for_each_node(nid) plist_head_init(&swap_avail_heads[nid]); swapfile_maximum_size = arch_max_swapfile_size(); #ifdef CONFIG_MIGRATION if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS)) swap_migration_ad_supported = true; #endif /* CONFIG_MIGRATION */ return 0; } subsys_initcall(swapfile_init);
16 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/xprt.h * * Declarations for the RPC transport interface. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> */ #ifndef _LINUX_SUNRPC_XPRT_H #define _LINUX_SUNRPC_XPRT_H #include <linux/uio.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/ktime.h> #include <linux/kref.h> #include <linux/sunrpc/sched.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/msg_prot.h> #define RPC_MIN_SLOT_TABLE (2U) #define RPC_DEF_SLOT_TABLE (16U) #define RPC_MAX_SLOT_TABLE_LIMIT (65536U) #define RPC_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE_LIMIT #define RPC_CWNDSHIFT (8U) #define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT) #define RPC_INITCWND RPC_CWNDSCALE #define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT) #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd) #define RPC_GSS_SEQNO_ARRAY_SIZE 3U enum rpc_display_format_t { RPC_DISPLAY_ADDR = 0, RPC_DISPLAY_PORT, RPC_DISPLAY_PROTO, RPC_DISPLAY_HEX_ADDR, RPC_DISPLAY_HEX_PORT, RPC_DISPLAY_NETID, RPC_DISPLAY_MAX, }; struct rpc_task; struct rpc_xprt; struct xprt_class; struct seq_file; struct svc_serv; struct net; #include <linux/lwq.h> /* * This describes a complete RPC request */ struct rpc_rqst { /* * This is the user-visible part */ struct rpc_xprt * rq_xprt; /* RPC client */ struct xdr_buf rq_snd_buf; /* send buffer */ struct xdr_buf rq_rcv_buf; /* recv buffer */ /* * This is the private part */ struct rpc_task * rq_task; /* RPC task data */ struct rpc_cred * rq_cred; /* Bound cred */ __be32 rq_xid; /* request XID */ int rq_cong; /* has incremented xprt->cong */ u32 rq_seqnos[RPC_GSS_SEQNO_ARRAY_SIZE]; /* past gss req seq nos. */ unsigned int rq_seqno_count; /* number of entries in rq_seqnos */ int rq_enc_pages_num; struct page **rq_enc_pages; /* scratch pages for use by gss privacy code */ void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ union { struct list_head rq_list; /* Slot allocation list */ struct rb_node rq_recv; /* Receive queue */ }; struct list_head rq_xmit; /* Send queue */ struct list_head rq_xmit2; /* Send queue */ void *rq_buffer; /* Call XDR encode buffer */ size_t rq_callsize; void *rq_rbuffer; /* Reply XDR decode buffer */ size_t rq_rcvsize; size_t rq_xmit_bytes_sent; /* total bytes sent */ size_t rq_reply_bytes_recvd; /* total reply bytes */ /* received */ struct xdr_buf rq_private_buf; /* The receive buffer * used in the softirq. */ unsigned long rq_majortimeo; /* major timeout alarm */ unsigned long rq_minortimeo; /* minor timeout alarm */ unsigned long rq_timeout; /* Current timeout value */ ktime_t rq_rtt; /* round-trip time */ unsigned int rq_retries; /* # of retries */ unsigned int rq_connect_cookie; /* A cookie used to track the state of the transport connection */ atomic_t rq_pin; /* * Partial send handling */ u32 rq_bytes_sent; /* Bytes we have sent */ ktime_t rq_xtime; /* transmit time stamp */ int rq_ntrans; #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct lwq_node rq_bc_list; /* Callback service list */ unsigned long rq_bc_pa_state; /* Backchannel prealloc state */ struct list_head rq_bc_pa_list; /* Backchannel prealloc list */ #endif /* CONFIG_SUNRPC_BACKCHANEL */ }; #define rq_svec rq_snd_buf.head #define rq_slen rq_snd_buf.len static inline int xprt_rqst_add_seqno(struct rpc_rqst *req, u32 seqno) { if (likely(req->rq_seqno_count < RPC_GSS_SEQNO_ARRAY_SIZE)) req->rq_seqno_count++; /* Shift array to make room for the newest element at the beginning */ memmove(&req->rq_seqnos[1], &req->rq_seqnos[0], (RPC_GSS_SEQNO_ARRAY_SIZE - 1) * sizeof(req->rq_seqnos[0])); req->rq_seqnos[0] = seqno; return 0; } /* RPC transport layer security policies */ enum xprtsec_policies { RPC_XPRTSEC_NONE = 0, RPC_XPRTSEC_TLS_ANON, RPC_XPRTSEC_TLS_X509, }; struct xprtsec_parms { enum xprtsec_policies policy; /* authentication material */ key_serial_t cert_serial; key_serial_t privkey_serial; }; struct rpc_xprt_ops { void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); void (*free_slot)(struct rpc_xprt *xprt, struct rpc_rqst *req); void (*rpcbind)(struct rpc_task *task); void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); int (*get_srcaddr)(struct rpc_xprt *xprt, char *buf, size_t buflen); unsigned short (*get_srcport)(struct rpc_xprt *xprt); int (*buf_alloc)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task); int (*prepare_request)(struct rpc_rqst *req, struct xdr_buf *buf); int (*send_request)(struct rpc_rqst *req); void (*abort_send_request)(struct rpc_rqst *req); void (*wait_for_reply_request)(struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_request)(struct rpc_task *task); void (*close)(struct rpc_xprt *xprt); void (*destroy)(struct rpc_xprt *xprt); void (*set_connect_timeout)(struct rpc_xprt *xprt, unsigned long connect_timeout, unsigned long reconnect_timeout); void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); int (*enable_swap)(struct rpc_xprt *xprt); void (*disable_swap)(struct rpc_xprt *xprt); void (*inject_disconnect)(struct rpc_xprt *xprt); int (*bc_setup)(struct rpc_xprt *xprt, unsigned int min_reqs); size_t (*bc_maxpayload)(struct rpc_xprt *xprt); unsigned int (*bc_num_slots)(struct rpc_xprt *xprt); void (*bc_free_rqst)(struct rpc_rqst *rqst); void (*bc_destroy)(struct rpc_xprt *xprt, unsigned int max_reqs); }; /* * RPC transport identifiers * * To preserve compatibility with the historical use of raw IP protocol * id's for transport selection, UDP and TCP identifiers are specified * with the previous values. No such restriction exists for new transports, * except that they may not collide with these values (17 and 6, * respectively). */ #define XPRT_TRANSPORT_BC (1 << 31) enum xprt_transports { XPRT_TRANSPORT_UDP = IPPROTO_UDP, XPRT_TRANSPORT_TCP = IPPROTO_TCP, XPRT_TRANSPORT_BC_TCP = IPPROTO_TCP | XPRT_TRANSPORT_BC, XPRT_TRANSPORT_RDMA = 256, XPRT_TRANSPORT_BC_RDMA = XPRT_TRANSPORT_RDMA | XPRT_TRANSPORT_BC, XPRT_TRANSPORT_LOCAL = 257, XPRT_TRANSPORT_TCP_TLS = 258, }; struct rpc_sysfs_xprt; struct rpc_xprt { struct kref kref; /* Reference count */ const struct rpc_xprt_ops *ops; /* transport methods */ unsigned int id; /* transport id */ const struct rpc_timeout *timeout; /* timeout parms */ struct sockaddr_storage addr; /* server address */ size_t addrlen; /* size of server address */ int prot; /* IP protocol */ unsigned long cong; /* current congestion */ unsigned long cwnd; /* congestion window */ size_t max_payload; /* largest RPC payload size, in bytes */ struct rpc_wait_queue binding; /* requests waiting on rpcbind */ struct rpc_wait_queue sending; /* requests waiting to send */ struct rpc_wait_queue pending; /* requests in flight */ struct rpc_wait_queue backlog; /* waiting for slot */ struct list_head free; /* free slots */ unsigned int max_reqs; /* max number of slots */ unsigned int min_reqs; /* min number of slots */ unsigned int num_reqs; /* total slots */ unsigned long state; /* transport state */ unsigned char resvport : 1, /* use a reserved port */ reuseport : 1; /* reuse port on reconnect */ atomic_t swapper; /* we're swapping over this transport */ unsigned int bind_index; /* bind function index */ /* * Multipath */ struct list_head xprt_switch; /* * Connection of transports */ unsigned long bind_timeout, reestablish_timeout; struct xprtsec_parms xprtsec; unsigned int connect_cookie; /* A cookie that gets bumped every time the transport is reconnected */ /* * Disconnection of idle transports */ struct work_struct task_cleanup; struct timer_list timer; unsigned long last_used, idle_timeout, connect_timeout, max_reconnect_timeout; /* * Send stuff */ atomic_long_t queuelen; spinlock_t transport_lock; /* lock transport info */ spinlock_t reserve_lock; /* lock slot table */ spinlock_t queue_lock; /* send/receive queue lock */ u32 xid; /* Next XID value to use */ struct rpc_task * snd_task; /* Task blocked in send */ struct list_head xmit_queue; /* Send queue */ atomic_long_t xmit_queuelen; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct svc_serv *bc_serv; /* The RPC service which will */ /* process the callback */ unsigned int bc_alloc_max; unsigned int bc_alloc_count; /* Total number of preallocs */ atomic_t bc_slot_count; /* Number of allocated slots */ spinlock_t bc_pa_lock; /* Protects the preallocated * items */ struct list_head bc_pa_list; /* List of preallocated * backchannel rpc_rqst's */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ struct rb_root recv_queue; /* Receive queue */ struct { unsigned long bind_count, /* total number of binds */ connect_count, /* total number of connects */ connect_start, /* connect start timestamp */ connect_time, /* jiffies waiting for connect */ sends, /* how many complete requests */ recvs, /* how many complete requests */ bad_xids, /* lookup_rqst didn't find XID */ max_slots; /* max rpc_slots used */ unsigned long long req_u, /* average requests on the wire */ bklog_u, /* backlog queue utilization */ sending_u, /* send q utilization */ pending_u; /* pend q utilization */ } stat; struct net *xprt_net; netns_tracker ns_tracker; const char *servername; const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *debugfs; /* debugfs directory */ #endif struct rcu_head rcu; const struct xprt_class *xprt_class; struct rpc_sysfs_xprt *xprt_sysfs; bool main; /*mark if this is the 1st transport */ }; #if defined(CONFIG_SUNRPC_BACKCHANNEL) /* * Backchannel flags */ #define RPC_BC_PA_IN_USE 0x0001 /* Preallocated backchannel */ /* buffer in use */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #if defined(CONFIG_SUNRPC_BACKCHANNEL) static inline int bc_prealloc(struct rpc_rqst *req) { return test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); } #else static inline int bc_prealloc(struct rpc_rqst *req) { return 0; } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #define XPRT_CREATE_INFINITE_SLOTS (1U) #define XPRT_CREATE_NO_IDLE_TIMEOUT (1U << 1) struct xprt_create { int ident; /* XPRT_TRANSPORT identifier */ struct net * net; struct sockaddr * srcaddr; /* optional local address */ struct sockaddr * dstaddr; /* remote peer address */ size_t addrlen; const char *servername; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ struct rpc_xprt_switch *bc_xps; unsigned int flags; struct xprtsec_parms xprtsec; unsigned long connect_timeout; unsigned long reconnect_timeout; }; struct xprt_class { struct list_head list; int ident; /* XPRT_TRANSPORT identifier */ struct rpc_xprt * (*setup)(struct xprt_create *); struct module *owner; char name[32]; const char * netid[]; }; /* * Generic internal transport functions */ struct rpc_xprt *xprt_create_transport(struct xprt_create *args); void xprt_connect(struct rpc_task *task); unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt); void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to); void xprt_reserve(struct rpc_task *task); void xprt_retry_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req); bool xprt_prepare_transmit(struct rpc_task *task); void xprt_request_enqueue_transmit(struct rpc_task *task); int xprt_request_enqueue_receive(struct rpc_task *task); void xprt_request_wait_receive(struct rpc_task *task); void xprt_request_dequeue_xprt(struct rpc_task *task); bool xprt_request_need_retransmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); int xprt_adjust_timeout(struct rpc_rqst *req); void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_release(struct rpc_task *task); struct rpc_xprt * xprt_get(struct rpc_xprt *xprt); void xprt_put(struct rpc_xprt *xprt); struct rpc_xprt * xprt_alloc(struct net *net, size_t size, unsigned int num_prealloc, unsigned int max_req); void xprt_free(struct rpc_xprt *); void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task); bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req); void xprt_cleanup_ids(void); static inline int xprt_enable_swap(struct rpc_xprt *xprt) { return xprt->ops->enable_swap(xprt); } static inline void xprt_disable_swap(struct rpc_xprt *xprt) { xprt->ops->disable_swap(xprt); } /* * Transport switch helper functions */ int xprt_register_transport(struct xprt_class *type); int xprt_unregister_transport(struct xprt_class *type); int xprt_find_transport_ident(const char *); void xprt_wait_for_reply_request_def(struct rpc_task *task); void xprt_wait_for_reply_request_rtt(struct rpc_task *task); void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); void xprt_wait_for_buffer_space(struct rpc_xprt *xprt); bool xprt_write_space(struct rpc_xprt *xprt); void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result); struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); void xprt_update_rtt(struct rpc_task *task); void xprt_complete_rqst(struct rpc_task *task, int copied); void xprt_pin_rqst(struct rpc_rqst *req); void xprt_unpin_rqst(struct rpc_rqst *req); void xprt_release_rqst_cong(struct rpc_task *task); bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req); void xprt_disconnect_done(struct rpc_xprt *xprt); void xprt_force_disconnect(struct rpc_xprt *xprt); void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); void xprt_unlock_connect(struct rpc_xprt *, void *); void xprt_release_write(struct rpc_xprt *, struct rpc_task *); /* * Reserved bit positions in xprt->state */ #define XPRT_LOCKED (0) #define XPRT_CONNECTED (1) #define XPRT_CONNECTING (2) #define XPRT_CLOSE_WAIT (3) #define XPRT_BOUND (4) #define XPRT_BINDING (5) #define XPRT_CLOSING (6) #define XPRT_OFFLINE (7) #define XPRT_REMOVE (8) #define XPRT_CONGESTED (9) #define XPRT_CWND_WAIT (10) #define XPRT_WRITE_SPACE (11) #define XPRT_SND_IS_COOKIE (12) static inline void xprt_set_connected(struct rpc_xprt *xprt) { set_bit(XPRT_CONNECTED, &xprt->state); } static inline void xprt_clear_connected(struct rpc_xprt *xprt) { clear_bit(XPRT_CONNECTED, &xprt->state); } static inline int xprt_connected(struct rpc_xprt *xprt) { return test_bit(XPRT_CONNECTED, &xprt->state); } static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt) { return test_and_set_bit(XPRT_CONNECTED, &xprt->state); } static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt) { return test_and_clear_bit(XPRT_CONNECTED, &xprt->state); } static inline void xprt_clear_connecting(struct rpc_xprt *xprt) { smp_mb__before_atomic(); clear_bit(XPRT_CONNECTING, &xprt->state); smp_mb__after_atomic(); } static inline int xprt_connecting(struct rpc_xprt *xprt) { return test_bit(XPRT_CONNECTING, &xprt->state); } static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt) { return test_and_set_bit(XPRT_CONNECTING, &xprt->state); } static inline void xprt_set_bound(struct rpc_xprt *xprt) { test_and_set_bit(XPRT_BOUND, &xprt->state); } static inline int xprt_bound(struct rpc_xprt *xprt) { return test_bit(XPRT_BOUND, &xprt->state); } static inline void xprt_clear_bound(struct rpc_xprt *xprt) { clear_bit(XPRT_BOUND, &xprt->state); } static inline void xprt_clear_binding(struct rpc_xprt *xprt) { smp_mb__before_atomic(); clear_bit(XPRT_BINDING, &xprt->state); smp_mb__after_atomic(); } static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) { return test_and_set_bit(XPRT_BINDING, &xprt->state); } void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps); void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps); void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps); #endif /* _LINUX_SUNRPC_XPRT_H */
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_ERROR_H #define _BCACHEFS_ERROR_H #include <linux/list.h> #include <linux/printk.h> #include "bkey_types.h" #include "sb-errors.h" struct bch_dev; struct bch_fs; struct work_struct; /* * XXX: separate out errors that indicate on disk data is inconsistent, and flag * superblock as such */ /* Error messages: */ void __bch2_log_msg_start(const char *, struct printbuf *); static inline void bch2_log_msg_start(struct bch_fs *c, struct printbuf *out) { __bch2_log_msg_start(c->name, out); } /* * Inconsistency errors: The on disk data is inconsistent. If these occur during * initial recovery, they don't indicate a bug in the running code - we walk all * the metadata before modifying anything. If they occur at runtime, they * indicate either a bug in the running code or (less likely) data is being * silently corrupted under us. * * XXX: audit all inconsistent errors and make sure they're all recoverable, in * BCH_ON_ERROR_CONTINUE mode */ bool __bch2_inconsistent_error(struct bch_fs *, struct printbuf *); bool bch2_inconsistent_error(struct bch_fs *); __printf(2, 3) bool bch2_fs_inconsistent(struct bch_fs *, const char *, ...); #define bch2_fs_inconsistent_on(cond, ...) \ ({ \ bool _ret = unlikely(!!(cond)); \ if (_ret) \ bch2_fs_inconsistent(__VA_ARGS__); \ _ret; \ }) __printf(2, 3) bool bch2_trans_inconsistent(struct btree_trans *, const char *, ...); #define bch2_trans_inconsistent_on(cond, ...) \ ({ \ bool _ret = unlikely(!!(cond)); \ if (_ret) \ bch2_trans_inconsistent(__VA_ARGS__); \ _ret; \ }) int __bch2_topology_error(struct bch_fs *, struct printbuf *); __printf(2, 3) int bch2_fs_topology_error(struct bch_fs *, const char *, ...); /* * Fsck errors: inconsistency errors we detect at mount time, and should ideally * be able to repair: */ struct fsck_err_state { struct list_head list; enum bch_sb_error_id id; u64 nr; bool ratelimited; int ret; int fix; char *last_msg; }; #define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err) bool __bch2_count_fsck_err(struct bch_fs *, enum bch_sb_error_id, struct printbuf *); #define bch2_count_fsck_err(_c, _err, ...) \ __bch2_count_fsck_err(_c, BCH_FSCK_ERR_##_err, __VA_ARGS__) int bch2_fsck_err_opt(struct bch_fs *, enum bch_fsck_flags, enum bch_sb_error_id); __printf(5, 6) __cold int __bch2_fsck_err(struct bch_fs *, struct btree_trans *, enum bch_fsck_flags, enum bch_sb_error_id, const char *, ...); #define bch2_fsck_err(c, _flags, _err_type, ...) \ __bch2_fsck_err(type_is(c, struct bch_fs *) ? (struct bch_fs *) c : NULL,\ type_is(c, struct btree_trans *) ? (struct btree_trans *) c : NULL,\ _flags, BCH_FSCK_ERR_##_err_type, __VA_ARGS__) void bch2_flush_fsck_errs(struct bch_fs *); void bch2_free_fsck_errs(struct bch_fs *); #define fsck_err_wrap(_do) \ ({ \ int _ret = _do; \ if (!bch2_err_matches(_ret, BCH_ERR_fsck_fix) && \ !bch2_err_matches(_ret, BCH_ERR_fsck_ignore)) { \ ret = _ret; \ goto fsck_err; \ } \ \ bch2_err_matches(_ret, BCH_ERR_fsck_fix); \ }) #define __fsck_err(...) fsck_err_wrap(bch2_fsck_err(__VA_ARGS__)) /* These macros return true if error should be fixed: */ /* XXX: mark in superblock that filesystem contains errors, if we ignore: */ #define __fsck_err_on(cond, c, _flags, _err_type, ...) \ ({ \ might_sleep(); \ \ if (type_is(c, struct bch_fs *)) \ WARN_ON(bch2_current_has_btree_trans((struct bch_fs *) c));\ \ (unlikely(cond) ? __fsck_err(c, _flags, _err_type, __VA_ARGS__) : false);\ }) #define mustfix_fsck_err(c, _err_type, ...) \ __fsck_err(c, FSCK_CAN_FIX, _err_type, __VA_ARGS__) #define mustfix_fsck_err_on(cond, c, _err_type, ...) \ __fsck_err_on(cond, c, FSCK_CAN_FIX, _err_type, __VA_ARGS__) #define fsck_err(c, _err_type, ...) \ __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) #define fsck_err_on(cond, c, _err_type, ...) \ __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) #define log_fsck_err(c, _err_type, ...) \ __fsck_err(c, FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) #define log_fsck_err_on(cond, ...) \ ({ \ bool _ret = unlikely(!!(cond)); \ if (_ret) \ log_fsck_err(__VA_ARGS__); \ _ret; \ }) enum bch_validate_flags; __printf(5, 6) int __bch2_bkey_fsck_err(struct bch_fs *, struct bkey_s_c, struct bkey_validate_context from, enum bch_sb_error_id, const char *, ...); /* * for now, bkey fsck errors are always handled by deleting the entire key - * this will change at some point */ #define bkey_fsck_err(c, _err_type, _err_msg, ...) \ do { \ int _ret = __bch2_bkey_fsck_err(c, k, from, \ BCH_FSCK_ERR_##_err_type, \ _err_msg, ##__VA_ARGS__); \ if (!bch2_err_matches(_ret, BCH_ERR_fsck_fix) && \ !bch2_err_matches(_ret, BCH_ERR_fsck_ignore)) \ ret = _ret; \ ret = bch_err_throw(c, fsck_delete_bkey); \ goto fsck_err; \ } while (0) #define bkey_fsck_err_on(cond, ...) \ do { \ if (unlikely(cond)) \ bkey_fsck_err(__VA_ARGS__); \ } while (0) /* * Fatal errors: these don't indicate a bug, but we can't continue running in RW * mode - pretty much just due to metadata IO errors: */ void bch2_fatal_error(struct bch_fs *); #define bch2_fs_fatal_error(c, _msg, ...) \ do { \ bch_err(c, "%s(): fatal error " _msg, __func__, ##__VA_ARGS__); \ bch2_fatal_error(c); \ } while (0) #define bch2_fs_fatal_err_on(cond, c, ...) \ ({ \ bool _ret = unlikely(!!(cond)); \ \ if (_ret) \ bch2_fs_fatal_error(c, __VA_ARGS__); \ _ret; \ }) /* * IO errors: either recoverable metadata IO (because we have replicas), or data * IO - we need to log it and print out a message, but we don't (necessarily) * want to shut down the fs: */ void bch2_io_error_work(struct work_struct *); /* Does the error handling without logging a message */ void bch2_io_error(struct bch_dev *, enum bch_member_error_type); #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT void bch2_latency_acct(struct bch_dev *, u64, int); #else static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) {} #endif static inline void bch2_account_io_success_fail(struct bch_dev *ca, enum bch_member_error_type type, bool success) { if (likely(success)) { if (type == BCH_MEMBER_ERROR_write && ca->write_errors_start) ca->write_errors_start = 0; } else { bch2_io_error(ca, type); } } static inline void bch2_account_io_completion(struct bch_dev *ca, enum bch_member_error_type type, u64 submit_time, bool success) { if (unlikely(!ca)) return; if (type != BCH_MEMBER_ERROR_checksum) bch2_latency_acct(ca, submit_time, type); bch2_account_io_success_fail(ca, type, success); } int bch2_inum_offset_err_msg_trans(struct btree_trans *, struct printbuf *, subvol_inum, u64); void bch2_inum_offset_err_msg(struct bch_fs *, struct printbuf *, subvol_inum, u64); int bch2_inum_snap_offset_err_msg_trans(struct btree_trans *, struct printbuf *, struct bpos); void bch2_inum_snap_offset_err_msg(struct bch_fs *, struct printbuf *, struct bpos); #endif /* _BCACHEFS_ERROR_H */
2 2 2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 // SPDX-License-Identifier: GPL-2.0+ /* MDIO Bus interface * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/errno.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mii.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of_device.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/reset.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/unistd.h> #define CREATE_TRACE_POINTS #include <trace/events/mdio.h> static int mdiobus_register_gpiod(struct mdio_device *mdiodev) { /* Deassert the optional reset signal */ mdiodev->reset_gpio = gpiod_get_optional(&mdiodev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(mdiodev->reset_gpio)) return PTR_ERR(mdiodev->reset_gpio); if (mdiodev->reset_gpio) gpiod_set_consumer_name(mdiodev->reset_gpio, "PHY reset"); return 0; } static int mdiobus_register_reset(struct mdio_device *mdiodev) { struct reset_control *reset; reset = reset_control_get_optional_exclusive(&mdiodev->dev, "phy"); if (IS_ERR(reset)) return PTR_ERR(reset); mdiodev->reset_ctrl = reset; return 0; } int mdiobus_register_device(struct mdio_device *mdiodev) { int err; if (mdiodev->bus->mdio_map[mdiodev->addr]) return -EBUSY; if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) { err = mdiobus_register_gpiod(mdiodev); if (err) return err; err = mdiobus_register_reset(mdiodev); if (err) return err; /* Assert the reset signal */ mdio_device_reset(mdiodev, 1); } mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev; return 0; } EXPORT_SYMBOL(mdiobus_register_device); int mdiobus_unregister_device(struct mdio_device *mdiodev) { if (mdiodev->bus->mdio_map[mdiodev->addr] != mdiodev) return -EINVAL; reset_control_put(mdiodev->reset_ctrl); mdiodev->bus->mdio_map[mdiodev->addr] = NULL; return 0; } EXPORT_SYMBOL(mdiobus_unregister_device); static struct mdio_device *mdiobus_find_device(struct mii_bus *bus, int addr) { bool addr_valid = addr >= 0 && addr < ARRAY_SIZE(bus->mdio_map); if (WARN_ONCE(!addr_valid, "addr %d out of range\n", addr)) return NULL; return bus->mdio_map[addr]; } struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) { struct mdio_device *mdiodev; mdiodev = mdiobus_find_device(bus, addr); if (!mdiodev) return NULL; if (!(mdiodev->flags & MDIO_DEVICE_FLAG_PHY)) return NULL; return container_of(mdiodev, struct phy_device, mdio); } EXPORT_SYMBOL(mdiobus_get_phy); bool mdiobus_is_registered_device(struct mii_bus *bus, int addr) { return mdiobus_find_device(bus, addr) != NULL; } EXPORT_SYMBOL(mdiobus_is_registered_device); /** * mdiobus_release - mii_bus device release callback * @d: the target struct device that contains the mii_bus * * Description: called when the last reference to an mii_bus is * dropped, to free the underlying memory. */ static void mdiobus_release(struct device *d) { struct mii_bus *bus = to_mii_bus(d); WARN(bus->state != MDIOBUS_RELEASED && /* for compatibility with error handling in drivers */ bus->state != MDIOBUS_ALLOCATED, "%s: not in RELEASED or ALLOCATED state\n", bus->id); if (bus->state == MDIOBUS_RELEASED) fwnode_handle_put(dev_fwnode(d)); kfree(bus); } struct mdio_bus_stat_attr { int addr; unsigned int field_offset; }; static u64 mdio_bus_get_stat(struct mdio_bus_stats *s, unsigned int offset) { const char *p = (const char *)s + offset; unsigned int start; u64 val = 0; do { start = u64_stats_fetch_begin(&s->syncp); val = u64_stats_read((const u64_stats_t *)p); } while (u64_stats_fetch_retry(&s->syncp, start)); return val; } static u64 mdio_bus_get_global_stat(struct mii_bus *bus, unsigned int offset) { unsigned int i; u64 val = 0; for (i = 0; i < PHY_MAX_ADDR; i++) val += mdio_bus_get_stat(&bus->stats[i], offset); return val; } static ssize_t mdio_bus_stat_field_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mii_bus *bus = to_mii_bus(dev); struct mdio_bus_stat_attr *sattr; struct dev_ext_attribute *eattr; u64 val; eattr = container_of(attr, struct dev_ext_attribute, attr); sattr = eattr->var; if (sattr->addr < 0) val = mdio_bus_get_global_stat(bus, sattr->field_offset); else val = mdio_bus_get_stat(&bus->stats[sattr->addr], sattr->field_offset); return sysfs_emit(buf, "%llu\n", val); } static ssize_t mdio_bus_device_stat_field_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mdio_device *mdiodev = to_mdio_device(dev); struct mii_bus *bus = mdiodev->bus; struct mdio_bus_stat_attr *sattr; struct dev_ext_attribute *eattr; int addr = mdiodev->addr; u64 val; eattr = container_of(attr, struct dev_ext_attribute, attr); sattr = eattr->var; val = mdio_bus_get_stat(&bus->stats[addr], sattr->field_offset); return sysfs_emit(buf, "%llu\n", val); } #define MDIO_BUS_STATS_ATTR_DECL(field, file) \ static struct dev_ext_attribute dev_attr_mdio_bus_##field = { \ .attr = { .attr = { .name = file, .mode = 0444 }, \ .show = mdio_bus_stat_field_show, \ }, \ .var = &((struct mdio_bus_stat_attr) { \ -1, offsetof(struct mdio_bus_stats, field) \ }), \ }; \ static struct dev_ext_attribute dev_attr_mdio_bus_device_##field = { \ .attr = { .attr = { .name = file, .mode = 0444 }, \ .show = mdio_bus_device_stat_field_show, \ }, \ .var = &((struct mdio_bus_stat_attr) { \ -1, offsetof(struct mdio_bus_stats, field) \ }), \ }; #define MDIO_BUS_STATS_ATTR(field) \ MDIO_BUS_STATS_ATTR_DECL(field, __stringify(field)) MDIO_BUS_STATS_ATTR(transfers); MDIO_BUS_STATS_ATTR(errors); MDIO_BUS_STATS_ATTR(writes); MDIO_BUS_STATS_ATTR(reads); #define MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, file) \ static struct dev_ext_attribute dev_attr_mdio_bus_addr_##field##_##addr = { \ .attr = { .attr = { .name = file, .mode = 0444 }, \ .show = mdio_bus_stat_field_show, \ }, \ .var = &((struct mdio_bus_stat_attr) { \ addr, offsetof(struct mdio_bus_stats, field) \ }), \ } #define MDIO_BUS_STATS_ADDR_ATTR(field, addr) \ MDIO_BUS_STATS_ADDR_ATTR_DECL(field, addr, \ __stringify(field) "_" __stringify(addr)) #define MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(addr) \ MDIO_BUS_STATS_ADDR_ATTR(transfers, addr); \ MDIO_BUS_STATS_ADDR_ATTR(errors, addr); \ MDIO_BUS_STATS_ADDR_ATTR(writes, addr); \ MDIO_BUS_STATS_ADDR_ATTR(reads, addr) \ MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(0); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(1); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(2); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(3); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(4); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(5); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(6); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(7); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(8); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(9); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(10); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(11); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(12); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(13); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(14); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(15); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(16); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(17); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(18); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(19); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(20); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(21); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(22); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(23); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(24); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(25); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(26); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(27); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(28); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(29); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(30); MDIO_BUS_STATS_ADDR_ATTR_GROUP_DECL(31); #define MDIO_BUS_STATS_ADDR_ATTR_GROUP(addr) \ &dev_attr_mdio_bus_addr_transfers_##addr.attr.attr, \ &dev_attr_mdio_bus_addr_errors_##addr.attr.attr, \ &dev_attr_mdio_bus_addr_writes_##addr.attr.attr, \ &dev_attr_mdio_bus_addr_reads_##addr.attr.attr \ static struct attribute *mdio_bus_statistics_attrs[] = { &dev_attr_mdio_bus_transfers.attr.attr, &dev_attr_mdio_bus_errors.attr.attr, &dev_attr_mdio_bus_writes.attr.attr, &dev_attr_mdio_bus_reads.attr.attr, MDIO_BUS_STATS_ADDR_ATTR_GROUP(0), MDIO_BUS_STATS_ADDR_ATTR_GROUP(1), MDIO_BUS_STATS_ADDR_ATTR_GROUP(2), MDIO_BUS_STATS_ADDR_ATTR_GROUP(3), MDIO_BUS_STATS_ADDR_ATTR_GROUP(4), MDIO_BUS_STATS_ADDR_ATTR_GROUP(5), MDIO_BUS_STATS_ADDR_ATTR_GROUP(6), MDIO_BUS_STATS_ADDR_ATTR_GROUP(7), MDIO_BUS_STATS_ADDR_ATTR_GROUP(8), MDIO_BUS_STATS_ADDR_ATTR_GROUP(9), MDIO_BUS_STATS_ADDR_ATTR_GROUP(10), MDIO_BUS_STATS_ADDR_ATTR_GROUP(11), MDIO_BUS_STATS_ADDR_ATTR_GROUP(12), MDIO_BUS_STATS_ADDR_ATTR_GROUP(13), MDIO_BUS_STATS_ADDR_ATTR_GROUP(14), MDIO_BUS_STATS_ADDR_ATTR_GROUP(15), MDIO_BUS_STATS_ADDR_ATTR_GROUP(16), MDIO_BUS_STATS_ADDR_ATTR_GROUP(17), MDIO_BUS_STATS_ADDR_ATTR_GROUP(18), MDIO_BUS_STATS_ADDR_ATTR_GROUP(19), MDIO_BUS_STATS_ADDR_ATTR_GROUP(20), MDIO_BUS_STATS_ADDR_ATTR_GROUP(21), MDIO_BUS_STATS_ADDR_ATTR_GROUP(22), MDIO_BUS_STATS_ADDR_ATTR_GROUP(23), MDIO_BUS_STATS_ADDR_ATTR_GROUP(24), MDIO_BUS_STATS_ADDR_ATTR_GROUP(25), MDIO_BUS_STATS_ADDR_ATTR_GROUP(26), MDIO_BUS_STATS_ADDR_ATTR_GROUP(27), MDIO_BUS_STATS_ADDR_ATTR_GROUP(28), MDIO_BUS_STATS_ADDR_ATTR_GROUP(29), MDIO_BUS_STATS_ADDR_ATTR_GROUP(30), MDIO_BUS_STATS_ADDR_ATTR_GROUP(31), NULL, }; static const struct attribute_group mdio_bus_statistics_group = { .name = "statistics", .attrs = mdio_bus_statistics_attrs, }; static const struct attribute_group *mdio_bus_groups[] = { &mdio_bus_statistics_group, NULL, }; const struct class mdio_bus_class = { .name = "mdio_bus", .dev_release = mdiobus_release, .dev_groups = mdio_bus_groups, }; EXPORT_SYMBOL_GPL(mdio_bus_class); /** * mdio_find_bus - Given the name of a mdiobus, find the mii_bus. * @mdio_name: The name of a mdiobus. * * Returns a reference to the mii_bus, or NULL if none found. The * embedded struct device will have its reference count incremented, * and this must be put_deviced'ed once the bus is finished with. */ struct mii_bus *mdio_find_bus(const char *mdio_name) { struct device *d; d = class_find_device_by_name(&mdio_bus_class, mdio_name); return d ? to_mii_bus(d) : NULL; } EXPORT_SYMBOL(mdio_find_bus); #if IS_ENABLED(CONFIG_OF_MDIO) /** * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. * @mdio_bus_np: Pointer to the mii_bus. * * Returns a reference to the mii_bus, or NULL if none found. The * embedded struct device will have its reference count incremented, * and this must be put once the bus is finished with. * * Because the association of a device_node and mii_bus is made via * of_mdiobus_register(), the mii_bus cannot be found before it is * registered with of_mdiobus_register(). * */ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np) { struct device *d; if (!mdio_bus_np) return NULL; d = class_find_device_by_of_node(&mdio_bus_class, mdio_bus_np); return d ? to_mii_bus(d) : NULL; } EXPORT_SYMBOL(of_mdio_find_bus); #endif static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret) { preempt_disable(); u64_stats_update_begin(&stats->syncp); u64_stats_inc(&stats->transfers); if (ret < 0) { u64_stats_inc(&stats->errors); goto out; } if (op) u64_stats_inc(&stats->reads); else u64_stats_inc(&stats->writes); out: u64_stats_update_end(&stats->syncp); preempt_enable(); } /** * __mdiobus_read - Unlocked version of the mdiobus_read function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to read * * Read a MDIO bus register. Caller must hold the mdio bus lock. * * NOTE: MUST NOT be called from interrupt context. */ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) { int retval; lockdep_assert_held_once(&bus->mdio_lock); if (addr >= PHY_MAX_ADDR) return -ENXIO; if (bus->read) retval = bus->read(bus, addr, regnum); else retval = -EOPNOTSUPP; trace_mdio_access(bus, 1, addr, regnum, retval, retval); mdiobus_stats_acct(&bus->stats[addr], true, retval); return retval; } EXPORT_SYMBOL(__mdiobus_read); /** * __mdiobus_write - Unlocked version of the mdiobus_write function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @val: value to write to @regnum * * Write a MDIO bus register. Caller must hold the mdio bus lock. * * NOTE: MUST NOT be called from interrupt context. */ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) { int err; lockdep_assert_held_once(&bus->mdio_lock); if (addr >= PHY_MAX_ADDR) return -ENXIO; if (bus->write) err = bus->write(bus, addr, regnum, val); else err = -EOPNOTSUPP; trace_mdio_access(bus, 0, addr, regnum, val, err); mdiobus_stats_acct(&bus->stats[addr], false, err); return err; } EXPORT_SYMBOL(__mdiobus_write); /** * __mdiobus_modify_changed - Unlocked version of the mdiobus_modify function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to modify * @mask: bit mask of bits to clear * @set: bit mask of bits to set * * Read, modify, and if any change, write the register value back to the * device. Any error returns a negative number. * * NOTE: MUST NOT be called from interrupt context. */ int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set) { int new, ret; ret = __mdiobus_read(bus, addr, regnum); if (ret < 0) return ret; new = (ret & ~mask) | set; if (new == ret) return 0; ret = __mdiobus_write(bus, addr, regnum, new); return ret < 0 ? ret : 1; } EXPORT_SYMBOL_GPL(__mdiobus_modify_changed); /** * __mdiobus_c45_read - Unlocked version of the mdiobus_c45_read function * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to read * * Read a MDIO bus register. Caller must hold the mdio bus lock. * * NOTE: MUST NOT be called from interrupt context. */ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum) { int retval; lockdep_assert_held_once(&bus->mdio_lock); if (addr >= PHY_MAX_ADDR) return -ENXIO; if (bus->read_c45) retval = bus->read_c45(bus, addr, devad, regnum); else retval = -EOPNOTSUPP; trace_mdio_access(bus, 1, addr, regnum, retval, retval); mdiobus_stats_acct(&bus->stats[addr], true, retval); return retval; } EXPORT_SYMBOL(__mdiobus_c45_read); /** * __mdiobus_c45_write - Unlocked version of the mdiobus_write function * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to write * @val: value to write to @regnum * * Write a MDIO bus register. Caller must hold the mdio bus lock. * * NOTE: MUST NOT be called from interrupt context. */ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 val) { int err; lockdep_assert_held_once(&bus->mdio_lock); if (addr >= PHY_MAX_ADDR) return -ENXIO; if (bus->write_c45) err = bus->write_c45(bus, addr, devad, regnum, val); else err = -EOPNOTSUPP; trace_mdio_access(bus, 0, addr, regnum, val, err); mdiobus_stats_acct(&bus->stats[addr], false, err); return err; } EXPORT_SYMBOL(__mdiobus_c45_write); /** * __mdiobus_c45_modify_changed - Unlocked version of the mdiobus_modify function * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to modify * @mask: bit mask of bits to clear * @set: bit mask of bits to set * * Read, modify, and if any change, write the register value back to the * device. Any error returns a negative number. * * NOTE: MUST NOT be called from interrupt context. */ static int __mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 mask, u16 set) { int new, ret; ret = __mdiobus_c45_read(bus, addr, devad, regnum); if (ret < 0) return ret; new = (ret & ~mask) | set; if (new == ret) return 0; ret = __mdiobus_c45_write(bus, addr, devad, regnum, new); return ret < 0 ? ret : 1; } /** * mdiobus_read_nested - Nested version of the mdiobus_read function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to read * * In case of nested MDIO bus access avoid lockdep false positives by * using mutex_lock_nested(). * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum) { int retval; mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); retval = __mdiobus_read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); return retval; } EXPORT_SYMBOL(mdiobus_read_nested); /** * mdiobus_read - Convenience function for reading a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to read * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum) { int retval; mutex_lock(&bus->mdio_lock); retval = __mdiobus_read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); return retval; } EXPORT_SYMBOL(mdiobus_read); /** * mdiobus_c45_read - Convenience function for reading a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to read * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum) { int retval; mutex_lock(&bus->mdio_lock); retval = __mdiobus_c45_read(bus, addr, devad, regnum); mutex_unlock(&bus->mdio_lock); return retval; } EXPORT_SYMBOL(mdiobus_c45_read); /** * mdiobus_c45_read_nested - Nested version of the mdiobus_c45_read function * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to read * * In case of nested MDIO bus access avoid lockdep false positives by * using mutex_lock_nested(). * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad, u32 regnum) { int retval; mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); retval = __mdiobus_c45_read(bus, addr, devad, regnum); mutex_unlock(&bus->mdio_lock); return retval; } EXPORT_SYMBOL(mdiobus_c45_read_nested); /** * mdiobus_write_nested - Nested version of the mdiobus_write function * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @val: value to write to @regnum * * In case of nested MDIO bus access avoid lockdep false positives by * using mutex_lock_nested(). * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val) { int err; mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); err = __mdiobus_write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL(mdiobus_write_nested); /** * mdiobus_write - Convenience function for writing a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @val: value to write to @regnum * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val) { int err; mutex_lock(&bus->mdio_lock); err = __mdiobus_write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL(mdiobus_write); /** * mdiobus_c45_write - Convenience function for writing a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to write * @val: value to write to @regnum * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 val) { int err; mutex_lock(&bus->mdio_lock); err = __mdiobus_c45_write(bus, addr, devad, regnum, val); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL(mdiobus_c45_write); /** * mdiobus_c45_write_nested - Nested version of the mdiobus_c45_write function * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to write * @val: value to write to @regnum * * In case of nested MDIO bus access avoid lockdep false positives by * using mutex_lock_nested(). * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 val) { int err; mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED); err = __mdiobus_c45_write(bus, addr, devad, regnum, val); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL(mdiobus_c45_write_nested); /* * __mdiobus_modify - Convenience function for modifying a given mdio device * register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @mask: bit mask of bits to clear * @set: bit mask of bits to set */ int __mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set) { int err; err = __mdiobus_modify_changed(bus, addr, regnum, mask, set); return err < 0 ? err : 0; } EXPORT_SYMBOL_GPL(__mdiobus_modify); /** * mdiobus_modify - Convenience function for modifying a given mdio device * register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @mask: bit mask of bits to clear * @set: bit mask of bits to set */ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set) { int err; mutex_lock(&bus->mdio_lock); err = __mdiobus_modify(bus, addr, regnum, mask, set); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL_GPL(mdiobus_modify); /** * mdiobus_c45_modify - Convenience function for modifying a given mdio device * register * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to write * @mask: bit mask of bits to clear * @set: bit mask of bits to set */ int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 mask, u16 set) { int err; mutex_lock(&bus->mdio_lock); err = __mdiobus_c45_modify_changed(bus, addr, devad, regnum, mask, set); mutex_unlock(&bus->mdio_lock); return err < 0 ? err : 0; } EXPORT_SYMBOL_GPL(mdiobus_c45_modify); /** * mdiobus_modify_changed - Convenience function for modifying a given mdio * device register and returning if it changed * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @mask: bit mask of bits to clear * @set: bit mask of bits to set */ int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set) { int err; mutex_lock(&bus->mdio_lock); err = __mdiobus_modify_changed(bus, addr, regnum, mask, set); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL_GPL(mdiobus_modify_changed); /** * mdiobus_c45_modify_changed - Convenience function for modifying a given mdio * device register and returning if it changed * @bus: the mii_bus struct * @addr: the phy address * @devad: device address to read * @regnum: register number to write * @mask: bit mask of bits to clear * @set: bit mask of bits to set */ int mdiobus_c45_modify_changed(struct mii_bus *bus, int addr, int devad, u32 regnum, u16 mask, u16 set) { int err; mutex_lock(&bus->mdio_lock); err = __mdiobus_c45_modify_changed(bus, addr, devad, regnum, mask, set); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL_GPL(mdiobus_c45_modify_changed); /** * mdio_bus_match - determine if given MDIO driver supports the given * MDIO device * @dev: target MDIO device * @drv: given MDIO driver * * Description: Given a MDIO device, and a MDIO driver, return 1 if * the driver supports the device. Otherwise, return 0. This may * require calling the devices own match function, since different classes * of MDIO devices have different match criteria. */ static int mdio_bus_match(struct device *dev, const struct device_driver *drv) { const struct mdio_driver *mdiodrv = to_mdio_driver(drv); struct mdio_device *mdio = to_mdio_device(dev); /* Both the driver and device must type-match */ if (!(mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY) != !(mdio->flags & MDIO_DEVICE_FLAG_PHY)) return 0; if (of_driver_match_device(dev, drv)) return 1; if (mdio->bus_match) return mdio->bus_match(dev, drv); return 0; } static int mdio_uevent(const struct device *dev, struct kobj_uevent_env *env) { int rc; /* Some devices have extra OF data and an OF-style MODALIAS */ rc = of_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; return 0; } static struct attribute *mdio_bus_device_statistics_attrs[] = { &dev_attr_mdio_bus_device_transfers.attr.attr, &dev_attr_mdio_bus_device_errors.attr.attr, &dev_attr_mdio_bus_device_writes.attr.attr, &dev_attr_mdio_bus_device_reads.attr.attr, NULL, }; static const struct attribute_group mdio_bus_device_statistics_group = { .name = "statistics", .attrs = mdio_bus_device_statistics_attrs, }; static const struct attribute_group *mdio_bus_dev_groups[] = { &mdio_bus_device_statistics_group, NULL, }; const struct bus_type mdio_bus_type = { .name = "mdio_bus", .dev_groups = mdio_bus_dev_groups, .match = mdio_bus_match, .uevent = mdio_uevent, }; EXPORT_SYMBOL(mdio_bus_type); static int __init mdio_bus_init(void) { int ret; ret = class_register(&mdio_bus_class); if (!ret) { ret = bus_register(&mdio_bus_type); if (ret) class_unregister(&mdio_bus_class); } return ret; } static void __exit mdio_bus_exit(void) { class_unregister(&mdio_bus_class); bus_unregister(&mdio_bus_type); } subsys_initcall(mdio_bus_init); module_exit(mdio_bus_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MDIO bus/device layer");
43 43 42 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 // SPDX-License-Identifier: GPL-2.0-only /* * ksyms_common.c: A split of kernel/kallsyms.c * Contains a few generic function definations independent of config KALLSYMS. */ #include <linux/kallsyms.h> #include <linux/security.h> static inline int kallsyms_for_perf(void) { #ifdef CONFIG_PERF_EVENTS extern int sysctl_perf_event_paranoid; if (sysctl_perf_event_paranoid <= 1) return 1; #endif return 0; } /* * We show kallsyms information even to normal users if we've enabled * kernel profiling and are explicitly not paranoid (so kptr_restrict * is clear, and sysctl_perf_event_paranoid isn't set). * * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to * block even that). */ bool kallsyms_show_value(const struct cred *cred) { switch (kptr_restrict) { case 0: if (kallsyms_for_perf()) return true; fallthrough; case 1: if (security_capable(cred, &init_user_ns, CAP_SYSLOG, CAP_OPT_NOAUDIT) == 0) return true; fallthrough; default: return false; } }
114 106 4376 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MM_SWAP_H #define _MM_SWAP_H struct mempolicy; extern int page_cluster; #ifdef CONFIG_SWAP #include <linux/swapops.h> /* for swp_offset */ #include <linux/blk_types.h> /* for bio_end_io_t */ /* linux/mm/page_io.c */ int sio_pool_init(void); struct swap_iocb; void swap_read_folio(struct folio *folio, struct swap_iocb **plug); void __swap_read_unplug(struct swap_iocb *plug); static inline void swap_read_unplug(struct swap_iocb *plug) { if (unlikely(plug)) __swap_read_unplug(plug); } void swap_write_unplug(struct swap_iocb *sio); int swap_writeout(struct folio *folio, struct writeback_control *wbc); void __swap_writepage(struct folio *folio, struct writeback_control *wbc); /* linux/mm/swap_state.c */ /* One swap address space for each 64M swap space */ #define SWAP_ADDRESS_SPACE_SHIFT 14 #define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT) #define SWAP_ADDRESS_SPACE_MASK (SWAP_ADDRESS_SPACE_PAGES - 1) extern struct address_space *swapper_spaces[]; #define swap_address_space(entry) \ (&swapper_spaces[swp_type(entry)][swp_offset(entry) \ >> SWAP_ADDRESS_SPACE_SHIFT]) /* * Return the swap device position of the swap entry. */ static inline loff_t swap_dev_pos(swp_entry_t entry) { return ((loff_t)swp_offset(entry)) << PAGE_SHIFT; } /* * Return the swap cache index of the swap entry. */ static inline pgoff_t swap_cache_index(swp_entry_t entry) { BUILD_BUG_ON((SWP_OFFSET_MASK | SWAP_ADDRESS_SPACE_MASK) != SWP_OFFSET_MASK); return swp_offset(entry) & SWAP_ADDRESS_SPACE_MASK; } void show_swap_cache_info(void); void *get_shadow_from_swap_cache(swp_entry_t entry); int add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp, void **shadowp); void __delete_from_swap_cache(struct folio *folio, swp_entry_t entry, void *shadow); void delete_from_swap_cache(struct folio *folio); void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end); void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr); struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr); struct folio *filemap_get_incore_folio(struct address_space *mapping, pgoff_t index); struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, struct swap_iocb **plug); struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags, struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, bool skip_if_exists); struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, struct mempolicy *mpol, pgoff_t ilx); struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag, struct vm_fault *vmf); static inline unsigned int folio_swap_flags(struct folio *folio) { return swp_swap_info(folio->swap)->flags; } /* * Return the count of contiguous swap entries that share the same * zeromap status as the starting entry. If is_zeromap is not NULL, * it will return the zeromap status of the starting entry. */ static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr, bool *is_zeromap) { struct swap_info_struct *sis = swp_swap_info(entry); unsigned long start = swp_offset(entry); unsigned long end = start + max_nr; bool first_bit; first_bit = test_bit(start, sis->zeromap); if (is_zeromap) *is_zeromap = first_bit; if (max_nr <= 1) return max_nr; if (first_bit) return find_next_zero_bit(sis->zeromap, end, start) - start; else return find_next_bit(sis->zeromap, end, start) - start; } #else /* CONFIG_SWAP */ struct swap_iocb; static inline void swap_read_folio(struct folio *folio, struct swap_iocb **plug) { } static inline void swap_write_unplug(struct swap_iocb *sio) { } static inline struct address_space *swap_address_space(swp_entry_t entry) { return NULL; } static inline pgoff_t swap_cache_index(swp_entry_t entry) { return 0; } static inline void show_swap_cache_info(void) { } static inline struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) { return NULL; } static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, struct vm_fault *vmf) { return NULL; } static inline int swap_writeout(struct folio *f, struct writeback_control *wbc) { return 0; } static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) { } static inline struct folio *swap_cache_get_folio(swp_entry_t entry, struct vm_area_struct *vma, unsigned long addr) { return NULL; } static inline struct folio *filemap_get_incore_folio(struct address_space *mapping, pgoff_t index) { return filemap_get_folio(mapping, index); } static inline void *get_shadow_from_swap_cache(swp_entry_t entry) { return NULL; } static inline int add_to_swap_cache(struct folio *folio, swp_entry_t entry, gfp_t gfp_mask, void **shadowp) { return -1; } static inline void __delete_from_swap_cache(struct folio *folio, swp_entry_t entry, void *shadow) { } static inline void delete_from_swap_cache(struct folio *folio) { } static inline void clear_shadow_from_swap_cache(int type, unsigned long begin, unsigned long end) { } static inline unsigned int folio_swap_flags(struct folio *folio) { return 0; } static inline int swap_zeromap_batch(swp_entry_t entry, int max_nr, bool *has_zeromap) { return 0; } #endif /* CONFIG_SWAP */ /** * folio_index - File index of a folio. * @folio: The folio. * * For a folio which is either in the page cache or the swap cache, * return its index within the address_space it belongs to. If you know * the folio is definitely in the page cache, you can look at the folio's * index directly. * * Return: The index (offset in units of pages) of a folio in its file. */ static inline pgoff_t folio_index(struct folio *folio) { if (unlikely(folio_test_swapcache(folio))) return swap_cache_index(folio->swap); return folio->index; } #endif /* _MM_SWAP_H */
21 53 15 11 80 7 30 30869 217 8 34 11 2316 194 178 8 283 5125 4728 250 188 5300 348 3251 234 427 5696 65 23 3324 68 18 56 263 757 193 1987 142 285 271 155 646 93 206 13 153 12477 6731 1960 456 1977 251 198 451 272 925 177 219 816 50 1267 13766 1332 1124 365 581 520 413 436 245 241 197 110 72 19 222 194 2614 15 61 2 91 89 21 6820 12259 13834 5484 8548 14707 2378 2119 126 126 415 48 89 32 8487 7821 450 747 48 10 6 1491 1 175 599 58 1 30 24 105 55 24 9 2 9452 23 36 33 8 49 55 15 31 83 425 782 19 212 76 78 24 2 39 32 32 44 2 46 28 24 1 88 51 13406 11765 7 1 5 11 609 25 15 19 16 158 1998 1855 185 1275 1350 243 244 18357 2584 69 51 1923 4795 261 16980 2 13852 2608 1198 58 1851 115 142 106 1 1 1 151 71 73 3 81 8 144 544 33 2 233 14 2 46 16 4 70 9 259 2 631 325 723 3 157 7939 305 1704 305 3576 1868 292 3 38 60 28 474 97 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 /* SPDX-License-Identifier: GPL-2.0 */ /* * Linux Security Module Hook declarations. * * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) * Copyright (C) 2015 Intel Corporation. * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com> * Copyright (C) 2016 Mellanox Techonologies * Copyright (C) 2020 Google LLC. */ /* * The macro LSM_HOOK is used to define the data structures required by * the LSM framework using the pattern: * * LSM_HOOK(<return_type>, <default_value>, <hook_name>, args...) * * struct security_hook_heads { * #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME; * #include <linux/lsm_hook_defs.h> * #undef LSM_HOOK * }; */ LSM_HOOK(int, 0, binder_set_context_mgr, const struct cred *mgr) LSM_HOOK(int, 0, binder_transaction, const struct cred *from, const struct cred *to) LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from, const struct cred *to) LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from, const struct cred *to, const struct file *file) LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child, unsigned int mode) LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent) LSM_HOOK(int, 0, capget, const struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted) LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old, const kernel_cap_t *effective, const kernel_cap_t *inheritable, const kernel_cap_t *permitted) LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns, int cap, unsigned int opts) LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb) LSM_HOOK(int, 0, quota_on, struct dentry *dentry) LSM_HOOK(int, 0, syslog, int type) LSM_HOOK(int, 0, settime, const struct timespec64 *ts, const struct timezone *tz) LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages) LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm) LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file) LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm) LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm) LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference) LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc, struct fs_context *src_sc) LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc, struct fs_parameter *param) LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_delete, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb) LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts) LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts) LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts) LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts) LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb) LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb) LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry) LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) LSM_HOOK(int, 0, sb_umount, struct vfsmount *mnt, int flags) LSM_HOOK(int, 0, sb_pivotroot, const struct path *old_path, const struct path *new_path) LSM_HOOK(int, 0, sb_set_mnt_opts, struct super_block *sb, void *mnt_opts, unsigned long kern_flags, unsigned long *set_kern_flags) LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb, struct super_block *newsb, unsigned long kern_flags, unsigned long *set_kern_flags) LSM_HOOK(int, 0, move_mount, const struct path *from_path, const struct path *to_path) LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry, int mode, const struct qstr *name, const char **xattr_name, struct lsm_context *cp) LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode, struct qstr *name, const struct cred *old, struct cred *new) #ifdef CONFIG_SECURITY_PATH LSM_HOOK(int, 0, path_unlink, const struct path *dir, struct dentry *dentry) LSM_HOOK(int, 0, path_mkdir, const struct path *dir, struct dentry *dentry, umode_t mode) LSM_HOOK(int, 0, path_rmdir, const struct path *dir, struct dentry *dentry) LSM_HOOK(int, 0, path_mknod, const struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) LSM_HOOK(void, LSM_RET_VOID, path_post_mknod, struct mnt_idmap *idmap, struct dentry *dentry) LSM_HOOK(int, 0, path_truncate, const struct path *path) LSM_HOOK(int, 0, path_symlink, const struct path *dir, struct dentry *dentry, const char *old_name) LSM_HOOK(int, 0, path_link, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) LSM_HOOK(int, 0, path_rename, const struct path *old_dir, struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry, unsigned int flags) LSM_HOOK(int, 0, path_chmod, const struct path *path, umode_t mode) LSM_HOOK(int, 0, path_chown, const struct path *path, kuid_t uid, kgid_t gid) LSM_HOOK(int, 0, path_chroot, const struct path *path) #endif /* CONFIG_SECURITY_PATH */ /* Needed for inode based security check */ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask, unsigned int obj_type) LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode) LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode) LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security) LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode, struct inode *dir, const struct qstr *qstr, struct xattr *xattrs, int *xattr_count) LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode, const struct qstr *name, const struct inode *context_inode) LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry, umode_t mode) LSM_HOOK(void, LSM_RET_VOID, inode_post_create_tmpfile, struct mnt_idmap *idmap, struct inode *inode) LSM_HOOK(int, 0, inode_link, struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) LSM_HOOK(int, 0, inode_unlink, struct inode *dir, struct dentry *dentry) LSM_HOOK(int, 0, inode_symlink, struct inode *dir, struct dentry *dentry, const char *old_name) LSM_HOOK(int, 0, inode_mkdir, struct inode *dir, struct dentry *dentry, umode_t mode) LSM_HOOK(int, 0, inode_rmdir, struct inode *dir, struct dentry *dentry) LSM_HOOK(int, 0, inode_mknod, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) LSM_HOOK(int, 0, inode_rename, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) LSM_HOOK(int, 0, inode_readlink, struct dentry *dentry) LSM_HOOK(int, 0, inode_follow_link, struct dentry *dentry, struct inode *inode, bool rcu) LSM_HOOK(int, 0, inode_permission, struct inode *inode, int mask) LSM_HOOK(int, 0, inode_setattr, struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) LSM_HOOK(void, LSM_RET_VOID, inode_post_setattr, struct mnt_idmap *idmap, struct dentry *dentry, int ia_valid) LSM_HOOK(int, 0, inode_getattr, const struct path *path) LSM_HOOK(int, 0, inode_xattr_skipcap, const char *name) LSM_HOOK(int, 0, inode_setxattr, struct mnt_idmap *idmap, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) LSM_HOOK(void, LSM_RET_VOID, inode_post_setxattr, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) LSM_HOOK(int, 0, inode_getxattr, struct dentry *dentry, const char *name) LSM_HOOK(int, 0, inode_listxattr, struct dentry *dentry) LSM_HOOK(int, 0, inode_removexattr, struct mnt_idmap *idmap, struct dentry *dentry, const char *name) LSM_HOOK(void, LSM_RET_VOID, inode_post_removexattr, struct dentry *dentry, const char *name) LSM_HOOK(int, 0, inode_set_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) LSM_HOOK(void, LSM_RET_VOID, inode_post_set_acl, struct dentry *dentry, const char *acl_name, struct posix_acl *kacl) LSM_HOOK(int, 0, inode_get_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) LSM_HOOK(int, 0, inode_remove_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) LSM_HOOK(void, LSM_RET_VOID, inode_post_remove_acl, struct mnt_idmap *idmap, struct dentry *dentry, const char *acl_name) LSM_HOOK(int, 0, inode_need_killpriv, struct dentry *dentry) LSM_HOOK(int, 0, inode_killpriv, struct mnt_idmap *idmap, struct dentry *dentry) LSM_HOOK(int, -EOPNOTSUPP, inode_getsecurity, struct mnt_idmap *idmap, struct inode *inode, const char *name, void **buffer, bool alloc) LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode, const char *name, const void *value, size_t size, int flags) LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, size_t buffer_size) LSM_HOOK(void, LSM_RET_VOID, inode_getlsmprop, struct inode *inode, struct lsm_prop *prop) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src, const char *name) LSM_HOOK(int, 0, inode_setintegrity, const struct inode *inode, enum lsm_integrity_type type, const void *value, size_t size) LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, struct kernfs_node *kn) LSM_HOOK(int, 0, file_permission, struct file *file, int mask) LSM_HOOK(int, 0, file_alloc_security, struct file *file) LSM_HOOK(void, LSM_RET_VOID, file_release, struct file *file) LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file) LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd, unsigned long arg) LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd, unsigned long arg) LSM_HOOK(int, 0, mmap_addr, unsigned long addr) LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd) LSM_HOOK(int, 0, file_fcntl, struct file *file, unsigned int cmd, unsigned long arg) LSM_HOOK(void, LSM_RET_VOID, file_set_fowner, struct file *file) LSM_HOOK(int, 0, file_send_sigiotask, struct task_struct *tsk, struct fown_struct *fown, int sig) LSM_HOOK(int, 0, file_receive, struct file *file) LSM_HOOK(int, 0, file_open, struct file *file) LSM_HOOK(int, 0, file_post_open, struct file *file, int mask) LSM_HOOK(int, 0, file_truncate, struct file *file) LSM_HOOK(int, 0, task_alloc, struct task_struct *task, unsigned long clone_flags) LSM_HOOK(void, LSM_RET_VOID, task_free, struct task_struct *task) LSM_HOOK(int, 0, cred_alloc_blank, struct cred *cred, gfp_t gfp) LSM_HOOK(void, LSM_RET_VOID, cred_free, struct cred *cred) LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old, gfp_t gfp) LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new, const struct cred *old) LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, cred_getlsmprop, const struct cred *c, struct lsm_prop *prop) LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid) LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode) LSM_HOOK(int, 0, kernel_module_request, char *kmod_name) LSM_HOOK(int, 0, kernel_load_data, enum kernel_load_data_id id, bool contents) LSM_HOOK(int, 0, kernel_post_load_data, char *buf, loff_t size, enum kernel_load_data_id id, char *description) LSM_HOOK(int, 0, kernel_read_file, struct file *file, enum kernel_read_file_id id, bool contents) LSM_HOOK(int, 0, kernel_post_read_file, struct file *file, char *buf, loff_t size, enum kernel_read_file_id id) LSM_HOOK(int, 0, task_fix_setuid, struct cred *new, const struct cred *old, int flags) LSM_HOOK(int, 0, task_fix_setgid, struct cred *new, const struct cred * old, int flags) LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old) LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) LSM_HOOK(void, LSM_RET_VOID, current_getlsmprop_subj, struct lsm_prop *prop) LSM_HOOK(void, LSM_RET_VOID, task_getlsmprop_obj, struct task_struct *p, struct lsm_prop *prop) LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice) LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio) LSM_HOOK(int, 0, task_getioprio, struct task_struct *p) LSM_HOOK(int, 0, task_prlimit, const struct cred *cred, const struct cred *tcred, unsigned int flags) LSM_HOOK(int, 0, task_setrlimit, struct task_struct *p, unsigned int resource, struct rlimit *new_rlim) LSM_HOOK(int, 0, task_setscheduler, struct task_struct *p) LSM_HOOK(int, 0, task_getscheduler, struct task_struct *p) LSM_HOOK(int, 0, task_movememory, struct task_struct *p) LSM_HOOK(int, 0, task_kill, struct task_struct *p, struct kernel_siginfo *info, int sig, const struct cred *cred) LSM_HOOK(int, -ENOSYS, task_prctl, int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p, struct inode *inode) LSM_HOOK(int, 0, userns_create, const struct cred *cred) LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag) LSM_HOOK(void, LSM_RET_VOID, ipc_getlsmprop, struct kern_ipc_perm *ipcp, struct lsm_prop *prop) LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg) LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg) LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm) LSM_HOOK(void, LSM_RET_VOID, msg_queue_free_security, struct kern_ipc_perm *perm) LSM_HOOK(int, 0, msg_queue_associate, struct kern_ipc_perm *perm, int msqflg) LSM_HOOK(int, 0, msg_queue_msgctl, struct kern_ipc_perm *perm, int cmd) LSM_HOOK(int, 0, msg_queue_msgsnd, struct kern_ipc_perm *perm, struct msg_msg *msg, int msqflg) LSM_HOOK(int, 0, msg_queue_msgrcv, struct kern_ipc_perm *perm, struct msg_msg *msg, struct task_struct *target, long type, int mode) LSM_HOOK(int, 0, shm_alloc_security, struct kern_ipc_perm *perm) LSM_HOOK(void, LSM_RET_VOID, shm_free_security, struct kern_ipc_perm *perm) LSM_HOOK(int, 0, shm_associate, struct kern_ipc_perm *perm, int shmflg) LSM_HOOK(int, 0, shm_shmctl, struct kern_ipc_perm *perm, int cmd) LSM_HOOK(int, 0, shm_shmat, struct kern_ipc_perm *perm, char __user *shmaddr, int shmflg) LSM_HOOK(int, 0, sem_alloc_security, struct kern_ipc_perm *perm) LSM_HOOK(void, LSM_RET_VOID, sem_free_security, struct kern_ipc_perm *perm) LSM_HOOK(int, 0, sem_associate, struct kern_ipc_perm *perm, int semflg) LSM_HOOK(int, 0, sem_semctl, struct kern_ipc_perm *perm, int cmd) LSM_HOOK(int, 0, sem_semop, struct kern_ipc_perm *perm, struct sembuf *sops, unsigned nsops, int alter) LSM_HOOK(int, 0, netlink_send, struct sock *sk, struct sk_buff *skb) LSM_HOOK(void, LSM_RET_VOID, d_instantiate, struct dentry *dentry, struct inode *inode) LSM_HOOK(int, -EOPNOTSUPP, getselfattr, unsigned int attr, struct lsm_ctx __user *ctx, u32 *size, u32 flags) LSM_HOOK(int, -EOPNOTSUPP, setselfattr, unsigned int attr, struct lsm_ctx *ctx, u32 size, u32 flags) LSM_HOOK(int, -EINVAL, getprocattr, struct task_struct *p, const char *name, char **value) LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) LSM_HOOK(int, 0, ismaclabel, const char *name) LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, struct lsm_context *cp) LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop, struct lsm_context *cp) LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, release_secctx, struct lsm_context *cp) LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode) LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen) LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen) LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, struct lsm_context *cp) #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) LSM_HOOK(int, 0, post_notification, const struct cred *w_cred, const struct cred *cred, struct watch_notification *n) #endif /* CONFIG_SECURITY && CONFIG_WATCH_QUEUE */ #if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS) LSM_HOOK(int, 0, watch_key, struct key *key) #endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */ #ifdef CONFIG_SECURITY_NETWORK LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other, struct sock *newsk) LSM_HOOK(int, 0, unix_may_send, struct socket *sock, struct socket *other) LSM_HOOK(int, 0, socket_create, int family, int type, int protocol, int kern) LSM_HOOK(int, 0, socket_post_create, struct socket *sock, int family, int type, int protocol, int kern) LSM_HOOK(int, 0, socket_socketpair, struct socket *socka, struct socket *sockb) LSM_HOOK(int, 0, socket_bind, struct socket *sock, struct sockaddr *address, int addrlen) LSM_HOOK(int, 0, socket_connect, struct socket *sock, struct sockaddr *address, int addrlen) LSM_HOOK(int, 0, socket_listen, struct socket *sock, int backlog) LSM_HOOK(int, 0, socket_accept, struct socket *sock, struct socket *newsock) LSM_HOOK(int, 0, socket_sendmsg, struct socket *sock, struct msghdr *msg, int size) LSM_HOOK(int, 0, socket_recvmsg, struct socket *sock, struct msghdr *msg, int size, int flags) LSM_HOOK(int, 0, socket_getsockname, struct socket *sock) LSM_HOOK(int, 0, socket_getpeername, struct socket *sock) LSM_HOOK(int, 0, socket_getsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_setsockopt, struct socket *sock, int level, int optname) LSM_HOOK(int, 0, socket_shutdown, struct socket *sock, int how) LSM_HOOK(int, 0, socket_sock_rcv_skb, struct sock *sk, struct sk_buff *skb) LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_stream, struct socket *sock, sockptr_t optval, sockptr_t optlen, unsigned int len) LSM_HOOK(int, -ENOPROTOOPT, socket_getpeersec_dgram, struct socket *sock, struct sk_buff *skb, u32 *secid) LSM_HOOK(int, 0, sk_alloc_security, struct sock *sk, int family, gfp_t priority) LSM_HOOK(void, LSM_RET_VOID, sk_free_security, struct sock *sk) LSM_HOOK(void, LSM_RET_VOID, sk_clone_security, const struct sock *sk, struct sock *newsk) LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, const struct sock *sk, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent) LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb, struct request_sock *req) LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk, const struct request_sock *req) LSM_HOOK(void, LSM_RET_VOID, inet_conn_established, struct sock *sk, struct sk_buff *skb) LSM_HOOK(int, 0, secmark_relabel_packet, u32 secid) LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void) LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void) LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req, struct flowi_common *flic) LSM_HOOK(int, 0, tun_dev_alloc_security, void *security) LSM_HOOK(int, 0, tun_dev_create, void) LSM_HOOK(int, 0, tun_dev_attach_queue, void *security) LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security) LSM_HOOK(int, 0, tun_dev_open, void *security) LSM_HOOK(int, 0, sctp_assoc_request, struct sctp_association *asoc, struct sk_buff *skb) LSM_HOOK(int, 0, sctp_bind_connect, struct sock *sk, int optname, struct sockaddr *address, int addrlen) LSM_HOOK(void, LSM_RET_VOID, sctp_sk_clone, struct sctp_association *asoc, struct sock *sk, struct sock *newsk) LSM_HOOK(int, 0, sctp_assoc_established, struct sctp_association *asoc, struct sk_buff *skb) LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk) #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_INFINIBAND LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey) LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name, u8 port_num) LSM_HOOK(int, 0, ib_alloc_security, void *sec) #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM LSM_HOOK(int, 0, xfrm_policy_alloc_security, struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp) LSM_HOOK(int, 0, xfrm_policy_clone_security, struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx) LSM_HOOK(void, LSM_RET_VOID, xfrm_policy_free_security, struct xfrm_sec_ctx *ctx) LSM_HOOK(int, 0, xfrm_policy_delete_security, struct xfrm_sec_ctx *ctx) LSM_HOOK(int, 0, xfrm_state_alloc, struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) LSM_HOOK(int, 0, xfrm_state_alloc_acquire, struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) LSM_HOOK(void, LSM_RET_VOID, xfrm_state_free_security, struct xfrm_state *x) LSM_HOOK(int, 0, xfrm_state_delete_security, struct xfrm_state *x) LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid) LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x, struct xfrm_policy *xp, const struct flowi_common *flic) LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, int ckall) #endif /* CONFIG_SECURITY_NETWORK_XFRM */ /* key management security hooks */ #ifdef CONFIG_KEYS LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred, unsigned long flags) LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred, enum key_need_perm need_perm) LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer) LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring, struct key *key, const void *payload, size_t payload_len, unsigned long flags, bool create) #endif /* CONFIG_KEYS */ #ifdef CONFIG_AUDIT LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp) LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule) LSM_HOOK(int, 0, audit_rule_match, struct lsm_prop *prop, u32 field, u32 op, void *lsmrule) LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule) #endif /* CONFIG_AUDIT */ #ifdef CONFIG_BPF_SYSCALL LSM_HOOK(int, 0, bpf, int cmd, union bpf_attr *attr, unsigned int size, bool kernel) LSM_HOOK(int, 0, bpf_map, struct bpf_map *map, fmode_t fmode) LSM_HOOK(int, 0, bpf_prog, struct bpf_prog *prog) LSM_HOOK(int, 0, bpf_map_create, struct bpf_map *map, union bpf_attr *attr, struct bpf_token *token, bool kernel) LSM_HOOK(void, LSM_RET_VOID, bpf_map_free, struct bpf_map *map) LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token, bool kernel) LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog) LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr, const struct path *path) LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token) LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd) LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap) #endif /* CONFIG_BPF_SYSCALL */ LSM_HOOK(int, 0, locked_down, enum lockdown_reason what) #ifdef CONFIG_PERF_EVENTS LSM_HOOK(int, 0, perf_event_open, int type) LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event) LSM_HOOK(int, 0, perf_event_read, struct perf_event *event) LSM_HOOK(int, 0, perf_event_write, struct perf_event *event) #endif /* CONFIG_PERF_EVENTS */ #ifdef CONFIG_IO_URING LSM_HOOK(int, 0, uring_override_creds, const struct cred *new) LSM_HOOK(int, 0, uring_sqpoll, void) LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd) LSM_HOOK(int, 0, uring_allowed, void) #endif /* CONFIG_IO_URING */ LSM_HOOK(void, LSM_RET_VOID, initramfs_populated, void) LSM_HOOK(int, 0, bdev_alloc_security, struct block_device *bdev) LSM_HOOK(void, LSM_RET_VOID, bdev_free_security, struct block_device *bdev) LSM_HOOK(int, 0, bdev_setintegrity, struct block_device *bdev, enum lsm_integrity_type type, const void *value, size_t size)
1 1 1 4 4 1 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 // SPDX-License-Identifier: GPL-2.0-only /* * OCB mode implementation * * Copyright: (c) 2014 Czech Technical University in Prague * (c) 2014 Volkswagen Group Research * Copyright (C) 2022-2023 Intel Corporation * Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz> * Funded by: Volkswagen Group Research */ #include <linux/ieee80211.h> #include <net/cfg80211.h> #include "nl80211.h" #include "core.h" #include "rdev-ops.h" int cfg80211_join_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev, struct ocb_setup *setup) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; lockdep_assert_wiphy(wdev->wiphy); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) return -EOPNOTSUPP; if (!rdev->ops->join_ocb) return -EOPNOTSUPP; if (WARN_ON(!setup->chandef.chan)) return -EINVAL; err = rdev_join_ocb(rdev, dev, setup); if (!err) wdev->u.ocb.chandef = setup->chandef; return err; } int cfg80211_leave_ocb(struct cfg80211_registered_device *rdev, struct net_device *dev) { struct wireless_dev *wdev = dev->ieee80211_ptr; int err; lockdep_assert_wiphy(wdev->wiphy); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_OCB) return -EOPNOTSUPP; if (!rdev->ops->leave_ocb) return -EOPNOTSUPP; if (!wdev->u.ocb.chandef.chan) return -ENOTCONN; err = rdev_leave_ocb(rdev, dev); if (!err) memset(&wdev->u.ocb.chandef, 0, sizeof(wdev->u.ocb.chandef)); return err; }
15088 334 29348 381 9 9 28959 73 1 538 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 /* SPDX-License-Identifier: GPL-2.0 */ /* * This file provides wrappers with sanitizer instrumentation for non-atomic * bit operations. * * To use this functionality, an arch's bitops.h file needs to define each of * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), * arch___set_bit(), etc.). */ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H #include <linux/instrumented.h> /** * ___set_bit - Set a bit in memory * @nr: the bit to set * @addr: the address to start counting from * * Unlike set_bit(), this function is non-atomic. If it is called on the same * region of memory concurrently, the effect may be that only one operation * succeeds. */ static __always_inline void ___set_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___set_bit(nr, addr); } /** * ___clear_bit - Clears a bit in memory * @nr: the bit to clear * @addr: the address to start counting from * * Unlike clear_bit(), this function is non-atomic. If it is called on the same * region of memory concurrently, the effect may be that only one operation * succeeds. */ static __always_inline void ___clear_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit(nr, addr); } /** * ___change_bit - Toggle a bit in memory * @nr: the bit to change * @addr: the address to start counting from * * Unlike change_bit(), this function is non-atomic. If it is called on the same * region of memory concurrently, the effect may be that only one operation * succeeds. */ static __always_inline void ___change_bit(unsigned long nr, volatile unsigned long *addr) { instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___change_bit(nr, addr); } static __always_inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr) { if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) { /* * We treat non-atomic read-write bitops a little more special. * Given the operations here only modify a single bit, assuming * non-atomicity of the writer is sufficient may be reasonable * for certain usage (and follows the permissible nature of the * assume-plain-writes-atomic rule): * 1. report read-modify-write races -> check read; * 2. do not report races with marked readers, but do report * races with unmarked readers -> check "atomic" write. */ kcsan_check_read(addr + BIT_WORD(nr), sizeof(long)); /* * Use generic write instrumentation, in case other sanitizers * or tools are enabled alongside KCSAN. */ instrument_write(addr + BIT_WORD(nr), sizeof(long)); } else { instrument_read_write(addr + BIT_WORD(nr), sizeof(long)); } } /** * ___test_and_set_bit - Set a bit and return its old value * @nr: Bit to set * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ static __always_inline bool ___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_set_bit(nr, addr); } /** * ___test_and_clear_bit - Clear a bit and return its old value * @nr: Bit to clear * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ static __always_inline bool ___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_clear_bit(nr, addr); } /** * ___test_and_change_bit - Change a bit and return its old value * @nr: Bit to change * @addr: Address to count from * * This operation is non-atomic. If two instances of this operation race, one * can appear to succeed but actually fail. */ static __always_inline bool ___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { __instrument_read_write_bitop(nr, addr); return arch___test_and_change_bit(nr, addr); } /** * _test_bit - Determine whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static __always_inline bool _test_bit(unsigned long nr, const volatile unsigned long *addr) { instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit(nr, addr); } /** * _test_bit_acquire - Determine, with acquire semantics, whether a bit is set * @nr: bit number to test * @addr: Address to start counting from */ static __always_inline bool _test_bit_acquire(unsigned long nr, const volatile unsigned long *addr) { instrument_atomic_read(addr + BIT_WORD(nr), sizeof(long)); return arch_test_bit_acquire(nr, addr); } #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
3 6 4 9 2 2 4 1 2 6 16 4 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 // SPDX-License-Identifier: GPL-2.0-only /* * VMware VMCI Driver * * Copyright (C) 2012 VMware, Inc. All rights reserved. */ #include <linux/vmw_vmci_defs.h> #include <linux/vmw_vmci_api.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/bug.h> #include "vmci_datagram.h" #include "vmci_resource.h" #include "vmci_context.h" #include "vmci_driver.h" #include "vmci_event.h" #include "vmci_route.h" /* * struct datagram_entry describes the datagram entity. It is used for datagram * entities created only on the host. */ struct datagram_entry { struct vmci_resource resource; u32 flags; bool run_delayed; vmci_datagram_recv_cb recv_cb; void *client_data; u32 priv_flags; }; struct delayed_datagram_info { struct datagram_entry *entry; struct work_struct work; bool in_dg_host_queue; /* msg and msg_payload must be together. */ struct vmci_datagram msg; u8 msg_payload[]; }; /* Number of in-flight host->host datagrams */ static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0); /* * Create a datagram entry given a handle pointer. */ static int dg_create_handle(u32 resource_id, u32 flags, u32 priv_flags, vmci_datagram_recv_cb recv_cb, void *client_data, struct vmci_handle *out_handle) { int result; u32 context_id; struct vmci_handle handle; struct datagram_entry *entry; if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0) return VMCI_ERROR_INVALID_ARGS; if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) { context_id = VMCI_INVALID_ID; } else { context_id = vmci_get_context_id(); if (context_id == VMCI_INVALID_ID) return VMCI_ERROR_NO_RESOURCES; } handle = vmci_make_handle(context_id, resource_id); entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { pr_warn("Failed allocating memory for datagram entry\n"); return VMCI_ERROR_NO_MEM; } entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false; entry->flags = flags; entry->recv_cb = recv_cb; entry->client_data = client_data; entry->priv_flags = priv_flags; /* Make datagram resource live. */ result = vmci_resource_add(&entry->resource, VMCI_RESOURCE_TYPE_DATAGRAM, handle); if (result != VMCI_SUCCESS) { pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n", handle.context, handle.resource, result); kfree(entry); return result; } *out_handle = vmci_resource_handle(&entry->resource); return VMCI_SUCCESS; } /* * Internal utility function with the same purpose as * vmci_datagram_get_priv_flags that also takes a context_id. */ static int vmci_datagram_get_priv_flags(u32 context_id, struct vmci_handle handle, u32 *priv_flags) { if (context_id == VMCI_INVALID_ID) return VMCI_ERROR_INVALID_ARGS; if (context_id == VMCI_HOST_CONTEXT_ID) { struct datagram_entry *src_entry; struct vmci_resource *resource; resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM); if (!resource) return VMCI_ERROR_INVALID_ARGS; src_entry = container_of(resource, struct datagram_entry, resource); *priv_flags = src_entry->priv_flags; vmci_resource_put(resource); } else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID) *priv_flags = VMCI_MAX_PRIVILEGE_FLAGS; else *priv_flags = vmci_context_get_priv_flags(context_id); return VMCI_SUCCESS; } /* * Calls the specified callback in a delayed context. */ static void dg_delayed_dispatch(struct work_struct *work) { struct delayed_datagram_info *dg_info = container_of(work, struct delayed_datagram_info, work); dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg); vmci_resource_put(&dg_info->entry->resource); if (dg_info->in_dg_host_queue) atomic_dec(&delayed_dg_host_queue_size); kfree(dg_info); } /* * Dispatch datagram as a host, to the host, or other vm context. This * function cannot dispatch to hypervisor context handlers. This should * have been handled before we get here by vmci_datagram_dispatch. * Returns number of bytes sent on success, error code otherwise. */ static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg) { int retval; size_t dg_size; u32 src_priv_flags; dg_size = VMCI_DG_SIZE(dg); /* Host cannot send to the hypervisor. */ if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) return VMCI_ERROR_DST_UNREACHABLE; /* Check that source handle matches sending context. */ if (dg->src.context != context_id) { pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n", context_id, dg->src.context, dg->src.resource); return VMCI_ERROR_NO_ACCESS; } /* Get hold of privileges of sending endpoint. */ retval = vmci_datagram_get_priv_flags(context_id, dg->src, &src_priv_flags); if (retval != VMCI_SUCCESS) { pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n", dg->src.context, dg->src.resource); return retval; } /* Determine if we should route to host or guest destination. */ if (dg->dst.context == VMCI_HOST_CONTEXT_ID) { /* Route to host datagram entry. */ struct datagram_entry *dst_entry; struct vmci_resource *resource; if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && dg->dst.resource == VMCI_EVENT_HANDLER) { return vmci_event_dispatch(dg); } resource = vmci_resource_by_handle(dg->dst, VMCI_RESOURCE_TYPE_DATAGRAM); if (!resource) { pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n", dg->dst.context, dg->dst.resource); return VMCI_ERROR_INVALID_RESOURCE; } dst_entry = container_of(resource, struct datagram_entry, resource); if (vmci_deny_interaction(src_priv_flags, dst_entry->priv_flags)) { vmci_resource_put(resource); return VMCI_ERROR_NO_ACCESS; } /* * If a VMCI datagram destined for the host is also sent by the * host, we always run it delayed. This ensures that no locks * are held when the datagram callback runs. */ if (dst_entry->run_delayed || dg->src.context == VMCI_HOST_CONTEXT_ID) { struct delayed_datagram_info *dg_info; if (atomic_add_return(1, &delayed_dg_host_queue_size) == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) { atomic_dec(&delayed_dg_host_queue_size); vmci_resource_put(resource); return VMCI_ERROR_NO_MEM; } dg_info = kmalloc(struct_size(dg_info, msg_payload, dg->payload_size), GFP_ATOMIC); if (!dg_info) { atomic_dec(&delayed_dg_host_queue_size); vmci_resource_put(resource); return VMCI_ERROR_NO_MEM; } dg_info->in_dg_host_queue = true; dg_info->entry = dst_entry; dg_info->msg = *dg; memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size); INIT_WORK(&dg_info->work, dg_delayed_dispatch); schedule_work(&dg_info->work); retval = VMCI_SUCCESS; } else { retval = dst_entry->recv_cb(dst_entry->client_data, dg); vmci_resource_put(resource); if (retval < VMCI_SUCCESS) return retval; } } else { /* Route to destination VM context. */ struct vmci_datagram *new_dg; if (context_id != dg->dst.context) { if (vmci_deny_interaction(src_priv_flags, vmci_context_get_priv_flags (dg->dst.context))) { return VMCI_ERROR_NO_ACCESS; } else if (VMCI_CONTEXT_IS_VM(context_id)) { /* * If the sending context is a VM, it * cannot reach another VM. */ pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n", context_id, dg->dst.context); return VMCI_ERROR_DST_UNREACHABLE; } } /* We make a copy to enqueue. */ new_dg = kmemdup(dg, dg_size, GFP_KERNEL); if (new_dg == NULL) return VMCI_ERROR_NO_MEM; retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg); if (retval < VMCI_SUCCESS) { kfree(new_dg); return retval; } } /* * We currently truncate the size to signed 32 bits. This doesn't * matter for this handler as it only support 4Kb messages. */ return (int)dg_size; } /* * Dispatch datagram as a guest, down through the VMX and potentially to * the host. * Returns number of bytes sent on success, error code otherwise. */ static int dg_dispatch_as_guest(struct vmci_datagram *dg) { int retval; struct vmci_resource *resource; resource = vmci_resource_by_handle(dg->src, VMCI_RESOURCE_TYPE_DATAGRAM); if (!resource) return VMCI_ERROR_NO_HANDLE; retval = vmci_send_datagram(dg); vmci_resource_put(resource); return retval; } /* * Dispatch datagram. This will determine the routing for the datagram * and dispatch it accordingly. * Returns number of bytes sent on success, error code otherwise. */ int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg, bool from_guest) { int retval; enum vmci_route route; BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24); if (dg->payload_size > VMCI_MAX_DG_SIZE || VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) { pr_devel("Payload (size=%llu bytes) too big to send\n", (unsigned long long)dg->payload_size); return VMCI_ERROR_INVALID_ARGS; } retval = vmci_route(&dg->src, &dg->dst, from_guest, &route); if (retval < VMCI_SUCCESS) { pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n", dg->src.context, dg->dst.context, retval); return retval; } if (VMCI_ROUTE_AS_HOST == route) { if (VMCI_INVALID_ID == context_id) context_id = VMCI_HOST_CONTEXT_ID; return dg_dispatch_as_host(context_id, dg); } if (VMCI_ROUTE_AS_GUEST == route) return dg_dispatch_as_guest(dg); pr_warn("Unknown route (%d) for datagram\n", route); return VMCI_ERROR_DST_UNREACHABLE; } /* * Invoke the handler for the given datagram. This is intended to be * called only when acting as a guest and receiving a datagram from the * virtual device. */ int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg) { struct vmci_resource *resource; struct datagram_entry *dst_entry; resource = vmci_resource_by_handle(dg->dst, VMCI_RESOURCE_TYPE_DATAGRAM); if (!resource) { pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n", dg->dst.context, dg->dst.resource); return VMCI_ERROR_NO_HANDLE; } dst_entry = container_of(resource, struct datagram_entry, resource); if (dst_entry->run_delayed) { struct delayed_datagram_info *dg_info; dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size, GFP_ATOMIC); if (!dg_info) { vmci_resource_put(resource); return VMCI_ERROR_NO_MEM; } dg_info->in_dg_host_queue = false; dg_info->entry = dst_entry; dg_info->msg = *dg; memcpy(&dg_info->msg_payload, dg + 1, dg->payload_size); INIT_WORK(&dg_info->work, dg_delayed_dispatch); schedule_work(&dg_info->work); } else { dst_entry->recv_cb(dst_entry->client_data, dg); vmci_resource_put(resource); } return VMCI_SUCCESS; } /* * vmci_datagram_create_handle_priv() - Create host context datagram endpoint * @resource_id: The resource ID. * @flags: Datagram Flags. * @priv_flags: Privilege Flags. * @recv_cb: Callback when receiving datagrams. * @client_data: Pointer for a datagram_entry struct * @out_handle: vmci_handle that is populated as a result of this function. * * Creates a host context datagram endpoint and returns a handle to it. */ int vmci_datagram_create_handle_priv(u32 resource_id, u32 flags, u32 priv_flags, vmci_datagram_recv_cb recv_cb, void *client_data, struct vmci_handle *out_handle) { if (out_handle == NULL) return VMCI_ERROR_INVALID_ARGS; if (recv_cb == NULL) { pr_devel("Client callback needed when creating datagram\n"); return VMCI_ERROR_INVALID_ARGS; } if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) return VMCI_ERROR_INVALID_ARGS; return dg_create_handle(resource_id, flags, priv_flags, recv_cb, client_data, out_handle); } EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv); /* * vmci_datagram_create_handle() - Create host context datagram endpoint * @resource_id: Resource ID. * @flags: Datagram Flags. * @recv_cb: Callback when receiving datagrams. * @client_ata: Pointer for a datagram_entry struct * @out_handle: vmci_handle that is populated as a result of this function. * * Creates a host context datagram endpoint and returns a handle to * it. Same as vmci_datagram_create_handle_priv without the priviledge * flags argument. */ int vmci_datagram_create_handle(u32 resource_id, u32 flags, vmci_datagram_recv_cb recv_cb, void *client_data, struct vmci_handle *out_handle) { return vmci_datagram_create_handle_priv( resource_id, flags, VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS, recv_cb, client_data, out_handle); } EXPORT_SYMBOL_GPL(vmci_datagram_create_handle); /* * vmci_datagram_destroy_handle() - Destroys datagram handle * @handle: vmci_handle to be destroyed and reaped. * * Use this function to destroy any datagram handles created by * vmci_datagram_create_handle{,Priv} functions. */ int vmci_datagram_destroy_handle(struct vmci_handle handle) { struct datagram_entry *entry; struct vmci_resource *resource; resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM); if (!resource) { pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n", handle.context, handle.resource); return VMCI_ERROR_NOT_FOUND; } entry = container_of(resource, struct datagram_entry, resource); vmci_resource_put(&entry->resource); vmci_resource_remove(&entry->resource); kfree(entry); return VMCI_SUCCESS; } EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle); /* * vmci_datagram_send() - Send a datagram * @msg: The datagram to send. * * Sends the provided datagram on its merry way. */ int vmci_datagram_send(struct vmci_datagram *msg) { if (msg == NULL) return VMCI_ERROR_INVALID_ARGS; return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false); } EXPORT_SYMBOL_GPL(vmci_datagram_send);
2 14 1 13 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 // SPDX-License-Identifier: GPL-2.0-or-later /* Module signature checker * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/module_signature.h> #include <linux/string.h> #include <linux/verification.h> #include <linux/security.h> #include <crypto/public_key.h> #include <uapi/linux/module.h> #include "internal.h" #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "module." static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE); module_param(sig_enforce, bool_enable_only, 0644); /* * Export sig_enforce kernel cmdline parameter to allow other subsystems rely * on that instead of directly to CONFIG_MODULE_SIG_FORCE config. */ bool is_module_sig_enforced(void) { return sig_enforce; } EXPORT_SYMBOL(is_module_sig_enforced); void set_module_sig_enforced(void) { sig_enforce = true; } /* * Verify the signature on a module. */ int mod_verify_sig(const void *mod, struct load_info *info) { struct module_signature ms; size_t sig_len, modlen = info->len; int ret; pr_devel("==>%s(,%zu)\n", __func__, modlen); if (modlen <= sizeof(ms)) return -EBADMSG; memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms)); ret = mod_check_sig(&ms, modlen, "module"); if (ret) return ret; sig_len = be32_to_cpu(ms.sig_len); modlen -= sig_len + sizeof(ms); info->len = modlen; return verify_pkcs7_signature(mod, modlen, mod + modlen, sig_len, VERIFY_USE_SECONDARY_KEYRING, VERIFYING_MODULE_SIGNATURE, NULL, NULL); } int module_sig_check(struct load_info *info, int flags) { int err = -ENODATA; const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; const char *reason; const void *mod = info->hdr; bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS | MODULE_INIT_IGNORE_VERMAGIC); /* * Do not allow mangled modules as a module with version information * removed is no longer the module that was signed. */ if (!mangled_module && info->len > markerlen && memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { /* We truncate the module to discard the signature */ info->len -= markerlen; err = mod_verify_sig(mod, info); if (!err) { info->sig_ok = true; return 0; } } /* * We don't permit modules to be loaded into the trusted kernels * without a valid signature on them, but if we're not enforcing, * certain errors are non-fatal. */ switch (err) { case -ENODATA: reason = "unsigned module"; break; case -ENOPKG: reason = "module with unsupported crypto"; break; case -ENOKEY: reason = "module with unavailable key"; break; default: /* * All other errors are fatal, including lack of memory, * unparseable signatures, and signature check failures -- * even if signatures aren't required. */ return err; } if (is_module_sig_enforced()) { pr_notice("Loading of %s is rejected\n", reason); return -EKEYREJECTED; } return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); }
9 9 6 6 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 // SPDX-License-Identifier: GPL-2.0-or-later /* * Squashfs - a compressed read only filesystem for Linux * * Copyright (c) 2016-present, Facebook, Inc. * All rights reserved. * * zstd_wrapper.c */ #include <linux/mutex.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/zstd.h> #include <linux/vmalloc.h> #include "squashfs_fs.h" #include "squashfs_fs_sb.h" #include "squashfs.h" #include "decompressor.h" #include "page_actor.h" struct workspace { void *mem; size_t mem_size; size_t window_size; }; static void *zstd_init(struct squashfs_sb_info *msblk, void *buff) { struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL); if (wksp == NULL) goto failed; wksp->window_size = max_t(size_t, msblk->block_size, SQUASHFS_METADATA_SIZE); wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size); wksp->mem = vmalloc(wksp->mem_size); if (wksp->mem == NULL) goto failed; return wksp; failed: ERROR("Failed to allocate zstd workspace\n"); kfree(wksp); return ERR_PTR(-ENOMEM); } static void zstd_free(void *strm) { struct workspace *wksp = strm; if (wksp) vfree(wksp->mem); kfree(wksp); } static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm, struct bio *bio, int offset, int length, struct squashfs_page_actor *output) { struct workspace *wksp = strm; zstd_dstream *stream; size_t total_out = 0; int error = 0; zstd_in_buffer in_buf = { NULL, 0, 0 }; zstd_out_buffer out_buf = { NULL, 0, 0 }; struct bvec_iter_all iter_all = {}; struct bio_vec *bvec = bvec_init_iter_all(&iter_all); stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size); if (!stream) { ERROR("Failed to initialize zstd decompressor\n"); return -EIO; } out_buf.size = PAGE_SIZE; out_buf.dst = squashfs_first_page(output); if (IS_ERR(out_buf.dst)) { error = PTR_ERR(out_buf.dst); goto finish; } for (;;) { size_t zstd_err; if (in_buf.pos == in_buf.size) { const void *data; int avail; if (!bio_next_segment(bio, &iter_all)) { error = -EIO; break; } avail = min(length, ((int)bvec->bv_len) - offset); data = bvec_virt(bvec); length -= avail; in_buf.src = data + offset; in_buf.size = avail; in_buf.pos = 0; offset = 0; } if (out_buf.pos == out_buf.size) { out_buf.dst = squashfs_next_page(output); if (IS_ERR(out_buf.dst)) { error = PTR_ERR(out_buf.dst); break; } else if (out_buf.dst == NULL) { /* Shouldn't run out of pages * before stream is done. */ error = -EIO; break; } out_buf.pos = 0; out_buf.size = PAGE_SIZE; } total_out -= out_buf.pos; zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf); total_out += out_buf.pos; /* add the additional data produced */ if (zstd_err == 0) break; if (zstd_is_error(zstd_err)) { ERROR("zstd decompression error: %d\n", (int)zstd_get_error_code(zstd_err)); error = -EIO; break; } } finish: squashfs_finish_page(output); return error ? error : total_out; } const struct squashfs_decompressor squashfs_zstd_comp_ops = { .init = zstd_init, .free = zstd_free, .decompress = zstd_uncompress, .id = ZSTD_COMPRESSION, .name = "zstd", .alloc_buffer = 1, .supported = 1 };
11 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 // SPDX-License-Identifier: GPL-2.0-only /* xfrm4_tunnel.c: Generic IP tunnel transformer. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #define pr_fmt(fmt) "IPsec: " fmt #include <linux/skbuff.h> #include <linux/module.h> #include <net/xfrm.h> #include <net/protocol.h> static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) { skb_push(skb, -skb_network_offset(skb)); return 0; } static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb) { return ip_hdr(skb)->protocol; } static int ipip_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) { if (x->props.mode != XFRM_MODE_TUNNEL) { NL_SET_ERR_MSG(extack, "IPv4 tunnel can only be used with tunnel mode"); return -EINVAL; } if (x->encap) { NL_SET_ERR_MSG(extack, "IPv4 tunnel is not compatible with encapsulation"); return -EINVAL; } x->props.header_len = sizeof(struct iphdr); return 0; } static void ipip_destroy(struct xfrm_state *x) { } static const struct xfrm_type ipip_type = { .owner = THIS_MODULE, .proto = IPPROTO_IPIP, .init_state = ipip_init_state, .destructor = ipip_destroy, .input = ipip_xfrm_rcv, .output = ipip_output }; static int xfrm_tunnel_rcv(struct sk_buff *skb) { return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr); } static int xfrm_tunnel_err(struct sk_buff *skb, u32 info) { return -ENOENT; } static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, .priority = 4, }; #if IS_ENABLED(CONFIG_IPV6) static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, .priority = 3, }; #endif static int __init ipip_init(void) { if (xfrm_register_type(&ipip_type, AF_INET) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) { pr_info("%s: can't add xfrm handler for AF_INET\n", __func__); xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; } #if IS_ENABLED(CONFIG_IPV6) if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) { pr_info("%s: can't add xfrm handler for AF_INET6\n", __func__); xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET); xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; } #endif return 0; } static void __exit ipip_fini(void) { #if IS_ENABLED(CONFIG_IPV6) if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6)) pr_info("%s: can't remove xfrm handler for AF_INET6\n", __func__); #endif if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET)) pr_info("%s: can't remove xfrm handler for AF_INET\n", __func__); xfrm_unregister_type(&ipip_type, AF_INET); } module_init(ipip_init); module_exit(ipip_fini); MODULE_DESCRIPTION("IPv4 XFRM tunnel driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_IPIP);
35 1 35 35 35 35 35 35 82 82 1 1 82 35 34 35 35 35 34 35 9 9 34 35 35 34 35 35 35 34 35 34 67 35 34 35 35 35 37 37 37 35 35 34 35 35 35 34 35 36 34 36 36 36 35 35 35 35 35 35 34 1 34 34 35 34 35 1 1 1 1 1 1 35 35 37 37 37 37 37 37 37 37 37 37 37 37 37 30 35 35 35 32 33 95 93 95 34 35 35 35 35 34 34 35 35 35 33 34 35 35 35 35 34 34 35 35 35 35 37 37 36 35 15 15 15 15 9 10 10 10 25 25 21 21 2 2 2 59 59 68 68 67 68 68 68 68 1 1 1 1 1 1 1 1 88 21 21 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "alloc_foreground.h" #include "bkey_buf.h" #include "bkey_methods.h" #include "btree_cache.h" #include "btree_gc.h" #include "btree_journal_iter.h" #include "btree_update.h" #include "btree_update_interior.h" #include "btree_io.h" #include "btree_iter.h" #include "btree_locking.h" #include "buckets.h" #include "clock.h" #include "enumerated_ref.h" #include "error.h" #include "extents.h" #include "io_write.h" #include "journal.h" #include "journal_reclaim.h" #include "keylist.h" #include "recovery_passes.h" #include "replicas.h" #include "sb-members.h" #include "super-io.h" #include "trace.h" #include <linux/random.h> static const char * const bch2_btree_update_modes[] = { #define x(t) #t, BTREE_UPDATE_MODES() #undef x NULL }; static void bch2_btree_update_to_text(struct printbuf *, struct btree_update *); static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *, btree_path_idx_t, struct btree *, struct keylist *); static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *); /* * Verify that child nodes correctly span parent node's range: */ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b) { struct bch_fs *c = trans->c; struct bpos node_min = b->key.k.type == KEY_TYPE_btree_ptr_v2 ? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key : b->data->min_key; struct btree_and_journal_iter iter; struct bkey_s_c k; struct printbuf buf = PRINTBUF; struct bkey_buf prev; int ret = 0; BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 && !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key, b->data->min_key)); bch2_bkey_buf_init(&prev); bkey_init(&prev.k->k); bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b); if (b == btree_node_root(c, b)) { if (!bpos_eq(b->data->min_key, POS_MIN)) { bch2_log_msg_start(c, &buf); prt_printf(&buf, "btree root with incorrect min_key: "); bch2_bpos_to_text(&buf, b->data->min_key); prt_newline(&buf); bch2_count_fsck_err(c, btree_root_bad_min_key, &buf); goto err; } if (!bpos_eq(b->data->max_key, SPOS_MAX)) { bch2_log_msg_start(c, &buf); prt_printf(&buf, "btree root with incorrect max_key: "); bch2_bpos_to_text(&buf, b->data->max_key); prt_newline(&buf); bch2_count_fsck_err(c, btree_root_bad_max_key, &buf); goto err; } } if (!b->c.level) goto out; while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) { if (k.k->type != KEY_TYPE_btree_ptr_v2) goto out; struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k); struct bpos expected_min = bkey_deleted(&prev.k->k) ? node_min : bpos_successor(prev.k->k.p); if (!bpos_eq(expected_min, bp.v->min_key)) { prt_str(&buf, "end of prev node doesn't match start of next node"); prt_str(&buf, "\nprev "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k)); prt_str(&buf, "\nnext "); bch2_bkey_val_to_text(&buf, c, k); prt_newline(&buf); bch2_count_fsck_err(c, btree_node_topology_bad_min_key, &buf); goto err; } bch2_bkey_buf_reassemble(&prev, c, k); bch2_btree_and_journal_iter_advance(&iter); } if (bkey_deleted(&prev.k->k)) { prt_printf(&buf, "empty interior node\n"); bch2_count_fsck_err(c, btree_node_topology_empty_interior_node, &buf); goto err; } if (!bpos_eq(prev.k->k.p, b->key.k.p)) { prt_str(&buf, "last child node doesn't end at end of parent node\nchild: "); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k)); prt_newline(&buf); bch2_count_fsck_err(c, btree_node_topology_bad_max_key, &buf); goto err; } out: bch2_btree_and_journal_iter_exit(&iter); bch2_bkey_buf_exit(&prev, c); printbuf_exit(&buf); return ret; err: bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level); prt_char(&buf, ' '); bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); prt_newline(&buf); ret = __bch2_topology_error(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); BUG_ON(!ret); goto out; } /* Calculate ideal packed bkey format for new btree nodes: */ static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b) { struct bkey_packed *k; struct bkey uk; for_each_bset(b, t) bset_tree_for_each_key(b, t, k) if (!bkey_deleted(k)) { uk = bkey_unpack_key(b, k); bch2_bkey_format_add_key(s, &uk); } } static struct bkey_format bch2_btree_calc_format(struct btree *b) { struct bkey_format_state s; bch2_bkey_format_init(&s); bch2_bkey_format_add_pos(&s, b->data->min_key); bch2_bkey_format_add_pos(&s, b->data->max_key); __bch2_btree_calc_format(&s, b); return bch2_bkey_format_done(&s); } static size_t btree_node_u64s_with_format(struct btree_nr_keys nr, struct bkey_format *old_f, struct bkey_format *new_f) { /* stupid integer promotion rules */ ssize_t delta = (((int) new_f->key_u64s - old_f->key_u64s) * (int) nr.packed_keys) + (((int) new_f->key_u64s - BKEY_U64s) * (int) nr.unpacked_keys); BUG_ON(delta + nr.live_u64s < 0); return nr.live_u64s + delta; } /** * bch2_btree_node_format_fits - check if we could rewrite node with a new format * * @c: filesystem handle * @b: btree node to rewrite * @nr: number of keys for new node (i.e. b->nr) * @new_f: bkey format to translate keys to * * Returns: true if all re-packed keys will be able to fit in a new node. * * Assumes all keys will successfully pack with the new format. */ static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, struct btree_nr_keys nr, struct bkey_format *new_f) { size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f); return __vstruct_bytes(struct btree_node, u64s) < btree_buf_bytes(b); } /* Btree node freeing/allocation: */ static void __btree_node_free(struct btree_trans *trans, struct btree *b) { struct bch_fs *c = trans->c; trace_and_count(c, btree_node_free, trans, b); BUG_ON(btree_node_write_blocked(b)); BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_need_write(b)); BUG_ON(b == btree_node_root(c, b)); BUG_ON(b->ob.nr); BUG_ON(!list_empty(&b->write_blocked)); BUG_ON(b->will_make_reachable); clear_btree_node_noevict(b); } static void bch2_btree_node_free_inmem(struct btree_trans *trans, struct btree_path *path, struct btree *b) { struct bch_fs *c = trans->c; bch2_btree_node_lock_write_nofail(trans, path, &b->c); __btree_node_free(trans, b); mutex_lock(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, b); mutex_unlock(&c->btree_cache.lock); six_unlock_write(&b->c.lock); mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); bch2_trans_node_drop(trans, b); } static void bch2_btree_node_free_never_used(struct btree_update *as, struct btree_trans *trans, struct btree *b) { struct bch_fs *c = as->c; struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL]; BUG_ON(!list_empty(&b->write_blocked)); BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as)); b->will_make_reachable = 0; closure_put(&as->cl); clear_btree_node_will_make_reachable(b); clear_btree_node_accessed(b); clear_btree_node_dirty_acct(c, b); clear_btree_node_need_write(b); mutex_lock(&c->btree_cache.lock); __bch2_btree_node_hash_remove(&c->btree_cache, b); mutex_unlock(&c->btree_cache.lock); BUG_ON(p->nr >= ARRAY_SIZE(p->b)); p->b[p->nr++] = b; six_unlock_intent(&b->c.lock); bch2_trans_node_drop(trans, b); } static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, struct disk_reservation *res, struct closure *cl, bool interior_node, unsigned target, unsigned flags) { struct bch_fs *c = trans->c; struct write_point *wp; struct btree *b; BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; struct open_buckets obs = { .nr = 0 }; struct bch_devs_list devs_have = (struct bch_devs_list) { 0 }; enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; unsigned nr_reserve = watermark < BCH_WATERMARK_reclaim ? BTREE_NODE_RESERVE : 0; int ret; b = bch2_btree_node_mem_alloc(trans, interior_node); if (IS_ERR(b)) return b; BUG_ON(b->ob.nr); mutex_lock(&c->btree_reserve_cache_lock); if (c->btree_reserve_cache_nr > nr_reserve) { struct btree_alloc *a = &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; obs = a->ob; bkey_copy(&tmp.k, &a->k); mutex_unlock(&c->btree_reserve_cache_lock); goto out; } mutex_unlock(&c->btree_reserve_cache_lock); retry: ret = bch2_alloc_sectors_start_trans(trans, target ?: c->opts.metadata_target ?: c->opts.foreground_target, 0, writepoint_ptr(&c->btree_write_point), &devs_have, res->nr_replicas, min(res->nr_replicas, c->opts.metadata_replicas_required), watermark, target ? BCH_WRITE_only_specified_devs : 0, cl, &wp); if (unlikely(ret)) goto err; if (wp->sectors_free < btree_sectors(c)) { struct open_bucket *ob; unsigned i; open_bucket_for_each(c, &wp->ptrs, ob, i) if (ob->sectors_free < btree_sectors(c)) ob->sectors_free = 0; bch2_alloc_sectors_done(c, wp); goto retry; } bkey_btree_ptr_v2_init(&tmp.k); bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false); bch2_open_bucket_get(c, wp, &obs); bch2_alloc_sectors_done(c, wp); out: bkey_copy(&b->key, &tmp.k); b->ob = obs; six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); return b; err: bch2_btree_node_to_freelist(c, b); return ERR_PTR(ret); } static struct btree *bch2_btree_node_alloc(struct btree_update *as, struct btree_trans *trans, unsigned level) { struct bch_fs *c = as->c; struct btree *b; struct prealloc_nodes *p = &as->prealloc_nodes[!!level]; int ret; BUG_ON(level >= BTREE_MAX_DEPTH); BUG_ON(!p->nr); b = p->b[--p->nr]; btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); set_btree_node_accessed(b); set_btree_node_dirty_acct(c, b); set_btree_node_need_write(b); bch2_bset_init_first(b, &b->data->keys); b->c.level = level; b->c.btree_id = as->btree_id; b->version_ondisk = c->sb.version; memset(&b->nr, 0, sizeof(b->nr)); b->data->magic = cpu_to_le64(bset_magic(c)); memset(&b->data->_ptr, 0, sizeof(b->data->_ptr)); b->data->flags = 0; SET_BTREE_NODE_ID(b->data, as->btree_id); SET_BTREE_NODE_LEVEL(b->data, level); if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(&b->key); bp->v.mem_ptr = 0; bp->v.seq = b->data->keys.seq; bp->v.sectors_written = 0; } SET_BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data, true); bch2_btree_build_aux_trees(b); ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id); BUG_ON(ret); trace_and_count(c, btree_node_alloc, trans, b); bch2_increment_clock(c, btree_sectors(c), WRITE); return b; } static void btree_set_min(struct btree *b, struct bpos pos) { if (b->key.k.type == KEY_TYPE_btree_ptr_v2) bkey_i_to_btree_ptr_v2(&b->key)->v.min_key = pos; b->data->min_key = pos; } static void btree_set_max(struct btree *b, struct bpos pos) { b->key.k.p = pos; b->data->max_key = pos; } static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as, struct btree_trans *trans, struct btree *b) { struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level); struct bkey_format format = bch2_btree_calc_format(b); /* * The keys might expand with the new format - if they wouldn't fit in * the btree node anymore, use the old format for now: */ if (!bch2_btree_node_format_fits(as->c, b, b->nr, &format)) format = b->format; SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1); btree_set_min(n, b->data->min_key); btree_set_max(n, b->data->max_key); n->data->format = format; btree_node_set_format(n, format); bch2_btree_sort_into(as->c, n, b); btree_node_reset_sib_u64s(n); return n; } static struct btree *__btree_root_alloc(struct btree_update *as, struct btree_trans *trans, unsigned level) { struct btree *b = bch2_btree_node_alloc(as, trans, level); btree_set_min(b, POS_MIN); btree_set_max(b, SPOS_MAX); b->data->format = bch2_btree_calc_format(b); btree_node_set_format(b, b->data->format); bch2_btree_build_aux_trees(b); return b; } static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *trans) { struct bch_fs *c = as->c; struct prealloc_nodes *p; for (p = as->prealloc_nodes; p < as->prealloc_nodes + ARRAY_SIZE(as->prealloc_nodes); p++) { while (p->nr) { struct btree *b = p->b[--p->nr]; mutex_lock(&c->btree_reserve_cache_lock); if (c->btree_reserve_cache_nr < ARRAY_SIZE(c->btree_reserve_cache)) { struct btree_alloc *a = &c->btree_reserve_cache[c->btree_reserve_cache_nr++]; a->ob = b->ob; b->ob.nr = 0; bkey_copy(&a->k, &b->key); } else { bch2_open_buckets_put(c, &b->ob); } mutex_unlock(&c->btree_reserve_cache_lock); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); __btree_node_free(trans, b); bch2_btree_node_to_freelist(c, b); } } } static int bch2_btree_reserve_get(struct btree_trans *trans, struct btree_update *as, unsigned nr_nodes[2], unsigned target, unsigned flags, struct closure *cl) { struct btree *b; unsigned interior; int ret = 0; BUG_ON(nr_nodes[0] + nr_nodes[1] > BTREE_RESERVE_MAX); /* * Protects reaping from the btree node cache and using the btree node * open bucket reserve: */ ret = bch2_btree_cache_cannibalize_lock(trans, cl); if (ret) return ret; for (interior = 0; interior < 2; interior++) { struct prealloc_nodes *p = as->prealloc_nodes + interior; while (p->nr < nr_nodes[interior]) { b = __bch2_btree_node_alloc(trans, &as->disk_res, cl, interior, target, flags); if (IS_ERR(b)) { ret = PTR_ERR(b); goto err; } p->b[p->nr++] = b; } } err: bch2_btree_cache_cannibalize_unlock(trans); return ret; } /* Asynchronous interior node update machinery */ static void bch2_btree_update_free(struct btree_update *as, struct btree_trans *trans) { struct bch_fs *c = as->c; if (as->took_gc_lock) up_read(&c->gc_lock); as->took_gc_lock = false; bch2_journal_pin_drop(&c->journal, &as->journal); bch2_journal_pin_flush(&c->journal, &as->journal); bch2_disk_reservation_put(c, &as->disk_res); bch2_btree_reserve_put(as, trans); bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_total], as->start_time); mutex_lock(&c->btree_interior_update_lock); list_del(&as->unwritten_list); list_del(&as->list); closure_debug_destroy(&as->cl); mempool_free(as, &c->btree_interior_update_pool); /* * Have to do the wakeup with btree_interior_update_lock still held, * since being on btree_interior_update_list is our ref on @c: */ closure_wake_up(&c->btree_interior_update_wait); mutex_unlock(&c->btree_interior_update_lock); } static void btree_update_add_key(struct btree_update *as, struct keylist *keys, struct btree *b) { struct bkey_i *k = &b->key; BUG_ON(bch2_keylist_u64s(keys) + k->k.u64s > ARRAY_SIZE(as->_old_keys)); bkey_copy(keys->top, k); bkey_i_to_btree_ptr_v2(keys->top)->v.mem_ptr = b->c.level + 1; bch2_keylist_push(keys); } static bool btree_update_new_nodes_marked_sb(struct btree_update *as) { for_each_keylist_key(&as->new_keys, k) if (!bch2_dev_btree_bitmap_marked(as->c, bkey_i_to_s_c(k))) return false; return true; } static void btree_update_new_nodes_mark_sb(struct btree_update *as) { struct bch_fs *c = as->c; mutex_lock(&c->sb_lock); for_each_keylist_key(&as->new_keys, k) bch2_dev_btree_bitmap_mark(c, bkey_i_to_s_c(k)); bch2_write_super(c); mutex_unlock(&c->sb_lock); } /* * The transactional part of an interior btree node update, where we journal the * update we did to the interior node and update alloc info: */ static int btree_update_nodes_written_trans(struct btree_trans *trans, struct btree_update *as) { struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, as->journal_u64s); int ret = PTR_ERR_OR_ZERO(e); if (ret) return ret; memcpy(e, as->journal_entries, as->journal_u64s * sizeof(u64)); trans->journal_pin = &as->journal; for_each_keylist_key(&as->old_keys, k) { unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr; ret = bch2_key_trigger_old(trans, as->btree_id, level, bkey_i_to_s_c(k), BTREE_TRIGGER_transactional); if (ret) return ret; } for_each_keylist_key(&as->new_keys, k) { unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr; ret = bch2_key_trigger_new(trans, as->btree_id, level, bkey_i_to_s(k), BTREE_TRIGGER_transactional); if (ret) return ret; } return 0; } /* If the node has been reused, we might be reading uninitialized memory - that's fine: */ static noinline __no_kmsan_checks bool btree_node_seq_matches(struct btree *b, __le64 seq) { struct btree_node *b_data = READ_ONCE(b->data); return (b_data ? b_data->keys.seq : 0) == seq; } static void btree_update_nodes_written(struct btree_update *as) { struct bch_fs *c = as->c; struct btree *b; struct btree_trans *trans = bch2_trans_get(c); u64 journal_seq = 0; unsigned i; int ret; /* * If we're already in an error state, it might be because a btree node * was never written, and we might be trying to free that same btree * node here, but it won't have been marked as allocated and we'll see * spurious disk usage inconsistencies in the transactional part below * if we don't skip it: */ ret = bch2_journal_error(&c->journal); if (ret) goto err; if (!btree_update_new_nodes_marked_sb(as)) btree_update_new_nodes_mark_sb(as); /* * Wait for any in flight writes to finish before we free the old nodes * on disk. But we haven't pinned those old nodes in the btree cache, * they might have already been evicted. * * The update we're completing deleted references to those nodes from the * btree, so we know if they've been evicted they can't be pulled back in. * We just have to check if the nodes we have pointers to are still those * old nodes, and haven't been reused. * * This can't be done locklessly because the data buffer might have been * vmalloc allocated, and they're not RCU freed. We also need the * __no_kmsan_checks annotation because even with the btree node read * lock, nothing tells us that the data buffer has been initialized (if * the btree node has been reused for a different node, and the data * buffer swapped for a new data buffer). */ for (i = 0; i < as->nr_old_nodes; i++) { b = as->old_nodes[i]; bch2_trans_begin(trans); btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); bool seq_matches = btree_node_seq_matches(b, as->old_nodes_seq[i]); six_unlock_read(&b->c.lock); bch2_trans_unlock_long(trans); if (seq_matches) wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner, TASK_UNINTERRUPTIBLE); } /* * We did an update to a parent node where the pointers we added pointed * to child nodes that weren't written yet: now, the child nodes have * been written so we can write out the update to the interior node. */ /* * We can't call into journal reclaim here: we'd block on the journal * reclaim lock, but we may need to release the open buckets we have * pinned in order for other btree updates to make forward progress, and * journal reclaim does btree updates when flushing bkey_cached entries, * which may require allocations as well. */ ret = commit_do(trans, &as->disk_res, &journal_seq, BCH_WATERMARK_interior_updates| BCH_TRANS_COMMIT_no_enospc| BCH_TRANS_COMMIT_no_check_rw| BCH_TRANS_COMMIT_journal_reclaim, btree_update_nodes_written_trans(trans, as)); bch2_trans_unlock(trans); bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c, "%s", bch2_err_str(ret)); err: /* * Ensure transaction is unlocked before using btree_node_lock_nopath() * (the use of which is always suspect, we need to work on removing this * in the future) * * It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get() * calls bch2_path_upgrade(), before we call path_make_mut(), so we may * rarely end up with a locked path besides the one we have here: */ bch2_trans_unlock(trans); bch2_trans_begin(trans); /* * We have to be careful because another thread might be getting ready * to free as->b and calling btree_update_reparent() on us - we'll * recheck under btree_update_lock below: */ b = READ_ONCE(as->b); if (b) { /* * @b is the node we did the final insert into: * * On failure to get a journal reservation, we still have to * unblock the write and allow most of the write path to happen * so that shutdown works, but the i->journal_seq mechanism * won't work to prevent the btree write from being visible (we * didn't get a journal sequence number) - instead * __bch2_btree_node_write() doesn't do the actual write if * we're in journal error state: */ btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans, as->btree_id, b->c.level, b->key.k.p); struct btree_path *path = trans->paths + path_idx; btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); path->l[b->c.level].b = b; bch2_btree_node_lock_write_nofail(trans, path, &b->c); mutex_lock(&c->btree_interior_update_lock); list_del(&as->write_blocked_list); if (list_empty(&b->write_blocked)) clear_btree_node_write_blocked(b); /* * Node might have been freed, recheck under * btree_interior_update_lock: */ if (as->b == b) { BUG_ON(!b->c.level); BUG_ON(!btree_node_dirty(b)); if (!ret) { struct bset *last = btree_bset_last(b); last->journal_seq = cpu_to_le64( max(journal_seq, le64_to_cpu(last->journal_seq))); bch2_btree_add_journal_pin(c, b, journal_seq); } else { /* * If we didn't get a journal sequence number we * can't write this btree node, because recovery * won't know to ignore this write: */ set_btree_node_never_write(b); } } mutex_unlock(&c->btree_interior_update_lock); mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); six_unlock_write(&b->c.lock); btree_node_write_if_need(trans, b, SIX_LOCK_intent); btree_node_unlock(trans, path, b->c.level); bch2_path_put(trans, path_idx, true); } bch2_journal_pin_drop(&c->journal, &as->journal); mutex_lock(&c->btree_interior_update_lock); for (i = 0; i < as->nr_new_nodes; i++) { b = as->new_nodes[i]; BUG_ON(b->will_make_reachable != (unsigned long) as); b->will_make_reachable = 0; clear_btree_node_will_make_reachable(b); } mutex_unlock(&c->btree_interior_update_lock); for (i = 0; i < as->nr_new_nodes; i++) { b = as->new_nodes[i]; btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); btree_node_write_if_need(trans, b, SIX_LOCK_read); six_unlock_read(&b->c.lock); } for (i = 0; i < as->nr_open_buckets; i++) bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]); bch2_btree_update_free(as, trans); bch2_trans_put(trans); } static void btree_interior_update_work(struct work_struct *work) { struct bch_fs *c = container_of(work, struct bch_fs, btree_interior_update_work); struct btree_update *as; while (1) { mutex_lock(&c->btree_interior_update_lock); as = list_first_entry_or_null(&c->btree_interior_updates_unwritten, struct btree_update, unwritten_list); if (as && !as->nodes_written) as = NULL; mutex_unlock(&c->btree_interior_update_lock); if (!as) break; btree_update_nodes_written(as); } } static CLOSURE_CALLBACK(btree_update_set_nodes_written) { closure_type(as, struct btree_update, cl); struct bch_fs *c = as->c; mutex_lock(&c->btree_interior_update_lock); as->nodes_written = true; mutex_unlock(&c->btree_interior_update_lock); queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work); } /* * We're updating @b with pointers to nodes that haven't finished writing yet: * block @b from being written until @as completes */ static void btree_update_updated_node(struct btree_update *as, struct btree *b) { struct bch_fs *c = as->c; BUG_ON(as->mode != BTREE_UPDATE_none); BUG_ON(as->update_level_end < b->c.level); BUG_ON(!btree_node_dirty(b)); BUG_ON(!b->c.level); mutex_lock(&c->btree_interior_update_lock); list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten); as->mode = BTREE_UPDATE_node; as->b = b; as->update_level_end = b->c.level; set_btree_node_write_blocked(b); list_add(&as->write_blocked_list, &b->write_blocked); mutex_unlock(&c->btree_interior_update_lock); } static int bch2_update_reparent_journal_pin_flush(struct journal *j, struct journal_entry_pin *_pin, u64 seq) { return 0; } static void btree_update_reparent(struct btree_update *as, struct btree_update *child) { struct bch_fs *c = as->c; lockdep_assert_held(&c->btree_interior_update_lock); child->b = NULL; child->mode = BTREE_UPDATE_update; bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal, bch2_update_reparent_journal_pin_flush); } static void btree_update_updated_root(struct btree_update *as, struct btree *b) { struct bkey_i *insert = &b->key; struct bch_fs *c = as->c; BUG_ON(as->mode != BTREE_UPDATE_none); BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) > ARRAY_SIZE(as->journal_entries)); as->journal_u64s += journal_entry_set((void *) &as->journal_entries[as->journal_u64s], BCH_JSET_ENTRY_btree_root, b->c.btree_id, b->c.level, insert, insert->k.u64s); mutex_lock(&c->btree_interior_update_lock); list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten); as->mode = BTREE_UPDATE_root; mutex_unlock(&c->btree_interior_update_lock); } /* * bch2_btree_update_add_new_node: * * This causes @as to wait on @b to be written, before it gets to * bch2_btree_update_nodes_written * * Additionally, it sets b->will_make_reachable to prevent any additional writes * to @b from happening besides the first until @b is reachable on disk * * And it adds @b to the list of @as's new nodes, so that we can update sector * counts in bch2_btree_update_nodes_written: */ static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b) { struct bch_fs *c = as->c; closure_get(&as->cl); mutex_lock(&c->btree_interior_update_lock); BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes)); BUG_ON(b->will_make_reachable); as->new_nodes[as->nr_new_nodes++] = b; b->will_make_reachable = 1UL|(unsigned long) as; set_btree_node_will_make_reachable(b); mutex_unlock(&c->btree_interior_update_lock); btree_update_add_key(as, &as->new_keys, b); if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { unsigned bytes = vstruct_end(&b->data->keys) - (void *) b->data; unsigned sectors = round_up(bytes, block_bytes(c)) >> 9; bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written = cpu_to_le16(sectors); } } /* * returns true if @b was a new node */ static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b) { struct btree_update *as; unsigned long v; unsigned i; mutex_lock(&c->btree_interior_update_lock); /* * When b->will_make_reachable != 0, it owns a ref on as->cl that's * dropped when it gets written by bch2_btree_complete_write - the * xchg() is for synchronization with bch2_btree_complete_write: */ v = xchg(&b->will_make_reachable, 0); clear_btree_node_will_make_reachable(b); as = (struct btree_update *) (v & ~1UL); if (!as) { mutex_unlock(&c->btree_interior_update_lock); return; } for (i = 0; i < as->nr_new_nodes; i++) if (as->new_nodes[i] == b) goto found; BUG(); found: array_remove_item(as->new_nodes, as->nr_new_nodes, i); mutex_unlock(&c->btree_interior_update_lock); if (v & 1) closure_put(&as->cl); } static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b) { while (b->ob.nr) as->open_buckets[as->nr_open_buckets++] = b->ob.v[--b->ob.nr]; } static int bch2_btree_update_will_free_node_journal_pin_flush(struct journal *j, struct journal_entry_pin *_pin, u64 seq) { return 0; } /* * @b is being split/rewritten: it may have pointers to not-yet-written btree * nodes and thus outstanding btree_updates - redirect @b's * btree_updates to point to this btree_update: */ static void bch2_btree_interior_update_will_free_node(struct btree_update *as, struct btree *b) { struct bch_fs *c = as->c; struct btree_update *p, *n; struct btree_write *w; set_btree_node_dying(b); if (btree_node_fake(b)) return; mutex_lock(&c->btree_interior_update_lock); /* * Does this node have any btree_update operations preventing * it from being written? * * If so, redirect them to point to this btree_update: we can * write out our new nodes, but we won't make them visible until those * operations complete */ list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) { list_del_init(&p->write_blocked_list); btree_update_reparent(as, p); /* * for flush_held_btree_writes() waiting on updates to flush or * nodes to be writeable: */ closure_wake_up(&c->btree_interior_update_wait); } clear_btree_node_dirty_acct(c, b); clear_btree_node_need_write(b); clear_btree_node_write_blocked(b); /* * Does this node have unwritten data that has a pin on the journal? * * If so, transfer that pin to the btree_update operation - * note that if we're freeing multiple nodes, we only need to keep the * oldest pin of any of the nodes we're freeing. We'll release the pin * when the new nodes are persistent and reachable on disk: */ w = btree_current_write(b); bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, bch2_btree_update_will_free_node_journal_pin_flush); bch2_journal_pin_drop(&c->journal, &w->journal); w = btree_prev_write(b); bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal, bch2_btree_update_will_free_node_journal_pin_flush); bch2_journal_pin_drop(&c->journal, &w->journal); mutex_unlock(&c->btree_interior_update_lock); /* * Is this a node that isn't reachable on disk yet? * * Nodes that aren't reachable yet have writes blocked until they're * reachable - now that we've cancelled any pending writes and moved * things waiting on that write to wait on this update, we can drop this * node from the list of nodes that the other update is making * reachable, prior to freeing it: */ btree_update_drop_new_node(c, b); btree_update_add_key(as, &as->old_keys, b); as->old_nodes[as->nr_old_nodes] = b; as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq; as->nr_old_nodes++; } static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *trans) { struct bch_fs *c = as->c; u64 start_time = as->start_time; BUG_ON(as->mode == BTREE_UPDATE_none); if (as->took_gc_lock) up_read(&as->c->gc_lock); as->took_gc_lock = false; bch2_btree_reserve_put(as, trans); continue_at(&as->cl, btree_update_set_nodes_written, as->c->btree_interior_update_worker); bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_foreground], start_time); } static const char * const btree_node_reawrite_reason_strs[] = { #define x(n) #n, BTREE_NODE_REWRITE_REASON() #undef x NULL, }; static struct btree_update * bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, unsigned level_start, bool split, unsigned target, unsigned flags) { struct bch_fs *c = trans->c; struct btree_update *as; u64 start_time = local_clock(); int disk_res_flags = (flags & BCH_TRANS_COMMIT_no_enospc) ? BCH_DISK_RESERVATION_NOFAIL : 0; unsigned nr_nodes[2] = { 0, 0 }; unsigned level_end = level_start; enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; int ret = 0; u32 restart_count = trans->restart_count; BUG_ON(!path->should_be_locked); if (watermark == BCH_WATERMARK_copygc) watermark = BCH_WATERMARK_btree_copygc; if (watermark < BCH_WATERMARK_btree) watermark = BCH_WATERMARK_btree; flags &= ~BCH_WATERMARK_MASK; flags |= watermark; if (watermark < BCH_WATERMARK_reclaim && test_bit(JOURNAL_space_low, &c->journal.flags)) { if (flags & BCH_TRANS_COMMIT_journal_reclaim) return ERR_PTR(-BCH_ERR_journal_reclaim_would_deadlock); ret = drop_locks_do(trans, ({ wait_event(c->journal.wait, !test_bit(JOURNAL_space_low, &c->journal.flags)); 0; })); if (ret) return ERR_PTR(ret); } while (1) { nr_nodes[!!level_end] += 1 + split; level_end++; ret = bch2_btree_path_upgrade(trans, path, level_end + 1); if (ret) return ERR_PTR(ret); if (!btree_path_node(path, level_end)) { /* Allocating new root? */ nr_nodes[1] += split; level_end = BTREE_MAX_DEPTH; break; } /* * Always check for space for two keys, even if we won't have to * split at prior level - it might have been a merge instead: */ if (bch2_btree_node_insert_fits(path->l[level_end].b, BKEY_BTREE_PTR_U64s_MAX * 2)) break; split = path->l[level_end].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c); } if (!down_read_trylock(&c->gc_lock)) { ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0)); if (ret) { up_read(&c->gc_lock); return ERR_PTR(ret); } } as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS); memset(as, 0, sizeof(*as)); closure_init(&as->cl, NULL); as->c = c; as->start_time = start_time; as->ip_started = _RET_IP_; as->mode = BTREE_UPDATE_none; as->flags = flags; as->took_gc_lock = true; as->btree_id = path->btree_id; as->update_level_start = level_start; as->update_level_end = level_end; INIT_LIST_HEAD(&as->list); INIT_LIST_HEAD(&as->unwritten_list); INIT_LIST_HEAD(&as->write_blocked_list); bch2_keylist_init(&as->old_keys, as->_old_keys); bch2_keylist_init(&as->new_keys, as->_new_keys); bch2_keylist_init(&as->parent_keys, as->inline_keys); mutex_lock(&c->btree_interior_update_lock); list_add_tail(&as->list, &c->btree_interior_update_list); mutex_unlock(&c->btree_interior_update_lock); struct btree *b = btree_path_node(path, path->level); as->node_start = b->data->min_key; as->node_end = b->data->max_key; as->node_needed_rewrite = btree_node_rewrite_reason(b); as->node_written = b->written; as->node_sectors = btree_buf_bytes(b) >> 9; as->node_remaining = __bch2_btree_u64s_remaining(b, btree_bkey_last(b, bset_tree_last(b))); /* * We don't want to allocate if we're in an error state, that can cause * deadlock on emergency shutdown due to open buckets getting stuck in * the btree_reserve_cache after allocator shutdown has cleared it out. * This check needs to come after adding us to the btree_interior_update * list but before calling bch2_btree_reserve_get, to synchronize with * __bch2_fs_read_only(). */ ret = bch2_journal_error(&c->journal); if (ret) goto err; ret = bch2_disk_reservation_get(c, &as->disk_res, (nr_nodes[0] + nr_nodes[1]) * btree_sectors(c), READ_ONCE(c->opts.metadata_replicas), disk_res_flags); if (ret) goto err; ret = bch2_btree_reserve_get(trans, as, nr_nodes, target, flags, NULL); if (bch2_err_matches(ret, ENOSPC) || bch2_err_matches(ret, ENOMEM)) { struct closure cl; /* * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK * flag */ if (bch2_err_matches(ret, ENOSPC) && (flags & BCH_TRANS_COMMIT_journal_reclaim) && watermark < BCH_WATERMARK_reclaim) { ret = bch_err_throw(c, journal_reclaim_would_deadlock); goto err; } closure_init_stack(&cl); do { ret = bch2_btree_reserve_get(trans, as, nr_nodes, target, flags, &cl); bch2_trans_unlock(trans); bch2_wait_on_allocator(c, &cl); } while (bch2_err_matches(ret, BCH_ERR_operation_blocked)); } if (ret) { trace_and_count(c, btree_reserve_get_fail, trans->fn, _RET_IP_, nr_nodes[0] + nr_nodes[1], ret); goto err; } ret = bch2_trans_relock(trans); if (ret) goto err; bch2_trans_verify_not_restarted(trans, restart_count); return as; err: bch2_btree_update_free(as, trans); if (!bch2_err_matches(ret, ENOSPC) && !bch2_err_matches(ret, EROFS) && ret != -BCH_ERR_journal_reclaim_would_deadlock && ret != -BCH_ERR_journal_shutdown) bch_err_fn_ratelimited(c, ret); return ERR_PTR(ret); } /* Btree root updates: */ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b) { /* Root nodes cannot be reaped */ mutex_lock(&c->btree_cache.lock); list_del_init(&b->list); mutex_unlock(&c->btree_cache.lock); mutex_lock(&c->btree_root_lock); bch2_btree_id_root(c, b->c.btree_id)->b = b; mutex_unlock(&c->btree_root_lock); bch2_recalc_btree_reserve(c); } static int bch2_btree_set_root(struct btree_update *as, struct btree_trans *trans, struct btree_path *path, struct btree *b, bool nofail) { struct bch_fs *c = as->c; trace_and_count(c, btree_node_set_root, trans, b); struct btree *old = btree_node_root(c, b); /* * Ensure no one is using the old root while we switch to the * new root: */ if (nofail) { bch2_btree_node_lock_write_nofail(trans, path, &old->c); } else { int ret = bch2_btree_node_lock_write(trans, path, &old->c); if (ret) return ret; } bch2_btree_set_root_inmem(c, b); btree_update_updated_root(as, b); /* * Unlock old root after new root is visible: * * The new root isn't persistent, but that's ok: we still have * an intent lock on the new root, and any updates that would * depend on the new root would have to update the new root. */ bch2_btree_node_unlock_write(trans, path, old); return 0; } /* Interior node updates: */ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree_trans *trans, struct btree_path *path, struct btree *b, struct btree_node_iter *node_iter, struct bkey_i *insert) { struct bch_fs *c = as->c; struct bkey_packed *k; struct printbuf buf = PRINTBUF; unsigned long old, new; BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 && !btree_ptr_sectors_written(bkey_i_to_s_c(insert))); if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags))) bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p); struct bkey_validate_context from = (struct bkey_validate_context) { .from = BKEY_VALIDATE_btree_node, .level = b->c.level, .btree = b->c.btree_id, .flags = BCH_VALIDATE_commit, }; if (bch2_bkey_validate(c, bkey_i_to_s_c(insert), from) ?: bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), from)) { bch2_fs_inconsistent(c, "%s: inserting invalid bkey", __func__); dump_stack(); } BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) > ARRAY_SIZE(as->journal_entries)); as->journal_u64s += journal_entry_set((void *) &as->journal_entries[as->journal_u64s], BCH_JSET_ENTRY_btree_keys, b->c.btree_id, b->c.level, insert, insert->k.u64s); while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) && bkey_iter_pos_cmp(b, k, &insert->k.p) < 0) bch2_btree_node_iter_advance(node_iter, b); bch2_btree_bset_insert_key(trans, path, b, node_iter, insert); set_btree_node_dirty_acct(c, b); old = READ_ONCE(b->flags); do { new = old; new &= ~BTREE_WRITE_TYPE_MASK; new |= BTREE_WRITE_interior; new |= 1 << BTREE_NODE_need_write; } while (!try_cmpxchg(&b->flags, &old, new)); printbuf_exit(&buf); } static int bch2_btree_insert_keys_interior(struct btree_update *as, struct btree_trans *trans, struct btree_path *path, struct btree *b, struct btree_node_iter node_iter, struct keylist *keys) { struct bkey_i *insert = bch2_keylist_front(keys); struct bkey_packed *k; BUG_ON(btree_node_type(b) != BKEY_TYPE_btree); while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) && (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0)) ; for (; insert != keys->top && bpos_le(insert->k.p, b->key.k.p); insert = bkey_next(insert)) bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert); int ret = bch2_btree_node_check_topology(trans, b); if (ret) { struct printbuf buf = PRINTBUF; for (struct bkey_i *k = keys->keys; k != insert; k = bkey_next(k)) { bch2_bkey_val_to_text(&buf, trans->c, bkey_i_to_s_c(k)); prt_newline(&buf); } bch2_fs_fatal_error(as->c, "%ps -> %s(): check_topology error %s: inserted keys\n%s", (void *) _RET_IP_, __func__, bch2_err_str(ret), buf.buf); dump_stack(); return ret; } memmove_u64s_down(keys->keys, insert, keys->top_p - insert->_data); keys->top_p -= insert->_data - keys->keys_p; return 0; } static bool key_deleted_in_insert(struct keylist *insert_keys, struct bpos pos) { if (insert_keys) for_each_keylist_key(insert_keys, k) if (bkey_deleted(&k->k) && bpos_eq(k->k.p, pos)) return true; return false; } /* * Move keys from n1 (original replacement node, now lower node) to n2 (higher * node) */ static void __btree_split_node(struct btree_update *as, struct btree_trans *trans, struct btree *b, struct btree *n[2], struct keylist *insert_keys) { struct bkey_packed *k; struct bpos n1_pos = POS_MIN; struct btree_node_iter iter; struct bset *bsets[2]; struct bkey_format_state format[2]; struct bkey_packed *out[2]; struct bkey uk; unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5; struct { unsigned nr_keys, val_u64s; } nr_keys[2]; int i; memset(&nr_keys, 0, sizeof(nr_keys)); for (i = 0; i < 2; i++) { BUG_ON(n[i]->nsets != 1); bsets[i] = btree_bset_first(n[i]); out[i] = bsets[i]->start; SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1); bch2_bkey_format_init(&format[i]); } u64s = 0; for_each_btree_node_key(b, k, &iter) { if (bkey_deleted(k)) continue; uk = bkey_unpack_key(b, k); if (b->c.level && u64s < n1_u64s && u64s + k->u64s >= n1_u64s && (bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p) || key_deleted_in_insert(insert_keys, uk.p))) n1_u64s += k->u64s; i = u64s >= n1_u64s; u64s += k->u64s; if (!i) n1_pos = uk.p; bch2_bkey_format_add_key(&format[i], &uk); nr_keys[i].nr_keys++; nr_keys[i].val_u64s += bkeyp_val_u64s(&b->format, k); } btree_set_min(n[0], b->data->min_key); btree_set_max(n[0], n1_pos); btree_set_min(n[1], bpos_successor(n1_pos)); btree_set_max(n[1], b->data->max_key); for (i = 0; i < 2; i++) { bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key); bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key); n[i]->data->format = bch2_bkey_format_done(&format[i]); unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s + nr_keys[i].val_u64s; if (__vstruct_bytes(struct btree_node, u64s) > btree_buf_bytes(b)) n[i]->data->format = b->format; btree_node_set_format(n[i], n[i]->data->format); } u64s = 0; for_each_btree_node_key(b, k, &iter) { if (bkey_deleted(k)) continue; i = u64s >= n1_u64s; u64s += k->u64s; if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k) ? &b->format: &bch2_bkey_format_current, k)) out[i]->format = KEY_FORMAT_LOCAL_BTREE; else bch2_bkey_unpack(b, (void *) out[i], k); out[i]->needs_whiteout = false; btree_keys_account_key_add(&n[i]->nr, 0, out[i]); out[i] = bkey_p_next(out[i]); } for (i = 0; i < 2; i++) { bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data); BUG_ON(!bsets[i]->u64s); set_btree_bset_end(n[i], n[i]->set); btree_node_reset_sib_u64s(n[i]); bch2_verify_btree_nr_keys(n[i]); BUG_ON(bch2_btree_node_check_topology(trans, n[i])); } } /* * For updates to interior nodes, we've got to do the insert before we split * because the stuff we're inserting has to be inserted atomically. Post split, * the keys might have to go in different nodes and the split would no longer be * atomic. * * Worse, if the insert is from btree node coalescing, if we do the insert after * we do the split (and pick the pivot) - the pivot we pick might be between * nodes that were coalesced, and thus in the middle of a child node post * coalescing: */ static int btree_split_insert_keys(struct btree_update *as, struct btree_trans *trans, btree_path_idx_t path_idx, struct btree *b, struct keylist *keys) { struct btree_path *path = trans->paths + path_idx; if (!bch2_keylist_empty(keys) && bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) { struct btree_node_iter node_iter; bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p); int ret = bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys); if (ret) return ret; } return 0; } static int btree_split(struct btree_update *as, struct btree_trans *trans, btree_path_idx_t path, struct btree *b, struct keylist *keys) { struct bch_fs *c = as->c; struct btree *parent = btree_node_parent(trans->paths + path, b); struct btree *n1, *n2 = NULL, *n3 = NULL; btree_path_idx_t path1 = 0, path2 = 0; u64 start_time = local_clock(); int ret = 0; bch2_verify_btree_nr_keys(b); BUG_ON(!parent && (b != btree_node_root(c, b))); BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1)); ret = bch2_btree_node_check_topology(trans, b); if (ret) return ret; if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) { struct btree *n[2]; trace_and_count(c, btree_node_split, trans, b); n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level); n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level); __btree_split_node(as, trans, b, n, keys); if (keys) { ret = btree_split_insert_keys(as, trans, path, n1, keys) ?: btree_split_insert_keys(as, trans, path, n2, keys); if (ret) goto err; BUG_ON(!bch2_keylist_empty(keys)); } bch2_btree_build_aux_trees(n2); bch2_btree_build_aux_trees(n1); bch2_btree_update_add_new_node(as, n1); bch2_btree_update_add_new_node(as, n2); six_unlock_write(&n2->c.lock); six_unlock_write(&n1->c.lock); path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p); six_lock_increment(&n1->c.lock, SIX_LOCK_intent); mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, trans->paths + path1, n1); path2 = bch2_path_get_unlocked_mut(trans, as->btree_id, n2->c.level, n2->key.k.p); six_lock_increment(&n2->c.lock, SIX_LOCK_intent); mark_btree_node_locked(trans, trans->paths + path2, n2->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, trans->paths + path2, n2); /* * Note that on recursive parent_keys == keys, so we * can't start adding new keys to parent_keys before emptying it * out (which we did with btree_split_insert_keys() above) */ bch2_keylist_add(&as->parent_keys, &n1->key); bch2_keylist_add(&as->parent_keys, &n2->key); if (!parent) { /* Depth increases, make a new root */ n3 = __btree_root_alloc(as, trans, b->c.level + 1); bch2_btree_update_add_new_node(as, n3); six_unlock_write(&n3->c.lock); trans->paths[path2].locks_want++; BUG_ON(btree_node_locked(trans->paths + path2, n3->c.level)); six_lock_increment(&n3->c.lock, SIX_LOCK_intent); mark_btree_node_locked(trans, trans->paths + path2, n3->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, trans->paths + path2, n3); n3->sib_u64s[0] = U16_MAX; n3->sib_u64s[1] = U16_MAX; ret = btree_split_insert_keys(as, trans, path, n3, &as->parent_keys); if (ret) goto err; } } else { trace_and_count(c, btree_node_compact, trans, b); n1 = bch2_btree_node_alloc_replacement(as, trans, b); if (keys) { ret = btree_split_insert_keys(as, trans, path, n1, keys); if (ret) goto err; BUG_ON(!bch2_keylist_empty(keys)); } bch2_btree_build_aux_trees(n1); bch2_btree_update_add_new_node(as, n1); six_unlock_write(&n1->c.lock); path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p); six_lock_increment(&n1->c.lock, SIX_LOCK_intent); mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, trans->paths + path1, n1); if (parent) bch2_keylist_add(&as->parent_keys, &n1->key); } /* New nodes all written, now make them visible: */ if (parent) { /* Split a non root node */ ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys); } else if (n3) { ret = bch2_btree_set_root(as, trans, trans->paths + path, n3, false); } else { /* Root filled up but didn't need to be split */ ret = bch2_btree_set_root(as, trans, trans->paths + path, n1, false); } if (ret) goto err; bch2_btree_interior_update_will_free_node(as, b); if (n3) { bch2_btree_update_get_open_buckets(as, n3); bch2_btree_node_write_trans(trans, n3, SIX_LOCK_intent, 0); } if (n2) { bch2_btree_update_get_open_buckets(as, n2); bch2_btree_node_write_trans(trans, n2, SIX_LOCK_intent, 0); } bch2_btree_update_get_open_buckets(as, n1); bch2_btree_node_write_trans(trans, n1, SIX_LOCK_intent, 0); /* * The old node must be freed (in memory) _before_ unlocking the new * nodes - else another thread could re-acquire a read lock on the old * node after another thread has locked and updated the new node, thus * seeing stale data: */ bch2_btree_node_free_inmem(trans, trans->paths + path, b); if (n3) bch2_trans_node_add(trans, trans->paths + path, n3); if (n2) bch2_trans_node_add(trans, trans->paths + path2, n2); bch2_trans_node_add(trans, trans->paths + path1, n1); if (n3) six_unlock_intent(&n3->c.lock); if (n2) six_unlock_intent(&n2->c.lock); six_unlock_intent(&n1->c.lock); out: if (path2) { __bch2_btree_path_unlock(trans, trans->paths + path2); bch2_path_put(trans, path2, true); } if (path1) { __bch2_btree_path_unlock(trans, trans->paths + path1); bch2_path_put(trans, path1, true); } bch2_trans_verify_locks(trans); bch2_time_stats_update(&c->times[n2 ? BCH_TIME_btree_node_split : BCH_TIME_btree_node_compact], start_time); return ret; err: if (n3) bch2_btree_node_free_never_used(as, trans, n3); if (n2) bch2_btree_node_free_never_used(as, trans, n2); bch2_btree_node_free_never_used(as, trans, n1); goto out; } /** * bch2_btree_insert_node - insert bkeys into a given btree node * * @as: btree_update object * @trans: btree_trans object * @path_idx: path that points to current node * @b: node to insert keys into * @keys: list of keys to insert * * Returns: 0 on success, typically transaction restart error on failure * * Inserts as many keys as it can into a given btree node, splitting it if full. * If a split occurred, this function will return early. This can only happen * for leaf nodes -- inserts into interior nodes have to be atomic. */ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans, btree_path_idx_t path_idx, struct btree *b, struct keylist *keys) { struct bch_fs *c = as->c; struct btree_path *path = trans->paths + path_idx, *linked; unsigned i; int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s); int old_live_u64s = b->nr.live_u64s; int live_u64s_added, u64s_added; int ret; lockdep_assert_held(&c->gc_lock); BUG_ON(!b->c.level); BUG_ON(!as || as->b); bch2_verify_keylist_sorted(keys); if (!btree_node_intent_locked(path, b->c.level)) { struct printbuf buf = PRINTBUF; bch2_log_msg_start(c, &buf); prt_printf(&buf, "%s(): node not locked at level %u\n", __func__, b->c.level); bch2_btree_update_to_text(&buf, as); bch2_btree_path_to_text(&buf, trans, path_idx); bch2_fs_emergency_read_only2(c, &buf); bch2_print_str(c, KERN_ERR, buf.buf); printbuf_exit(&buf); return -EIO; } ret = bch2_btree_node_lock_write(trans, path, &b->c); if (ret) return ret; bch2_btree_node_prep_for_write(trans, path, b); if (!bch2_btree_node_insert_fits(b, bch2_keylist_u64s(keys))) { bch2_btree_node_unlock_write(trans, path, b); goto split; } ret = bch2_btree_node_check_topology(trans, b) ?: bch2_btree_insert_keys_interior(as, trans, path, b, path->l[b->c.level].iter, keys); if (ret) { bch2_btree_node_unlock_write(trans, path, b); return ret; } trans_for_each_path_with_node(trans, b, linked, i) bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b); bch2_trans_verify_paths(trans); live_u64s_added = (int) b->nr.live_u64s - old_live_u64s; u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s; if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0) b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added); if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0) b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added); if (u64s_added > live_u64s_added && bch2_maybe_compact_whiteouts(c, b)) bch2_trans_node_reinit_iter(trans, b); btree_update_updated_node(as, b); bch2_btree_node_unlock_write(trans, path, b); return 0; split: /* * We could attempt to avoid the transaction restart, by calling * bch2_btree_path_upgrade() and allocating more nodes: */ if (b->c.level >= as->update_level_end) { trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b); return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race); } return btree_split(as, trans, path_idx, b, keys); } int bch2_btree_split_leaf(struct btree_trans *trans, btree_path_idx_t path, unsigned flags) { /* btree_split & merge may both cause paths array to be reallocated */ struct btree *b = path_l(trans->paths + path)->b; struct btree_update *as; unsigned l; int ret = 0; as = bch2_btree_update_start(trans, trans->paths + path, trans->paths[path].level, true, 0, flags); if (IS_ERR(as)) return PTR_ERR(as); ret = btree_split(as, trans, path, b, NULL); if (ret) { bch2_btree_update_free(as, trans); return ret; } bch2_btree_update_done(as, trans); for (l = trans->paths[path].level + 1; btree_node_intent_locked(&trans->paths[path], l) && !ret; l++) ret = bch2_foreground_maybe_merge(trans, path, l, flags); return ret; } static void __btree_increase_depth(struct btree_update *as, struct btree_trans *trans, btree_path_idx_t path_idx) { struct bch_fs *c = as->c; struct btree_path *path = trans->paths + path_idx; struct btree *n, *b = bch2_btree_id_root(c, path->btree_id)->b; BUG_ON(!btree_node_locked(path, b->c.level)); n = __btree_root_alloc(as, trans, b->c.level + 1); bch2_btree_update_add_new_node(as, n); six_unlock_write(&n->c.lock); path->locks_want++; BUG_ON(btree_node_locked(path, n->c.level)); six_lock_increment(&n->c.lock, SIX_LOCK_intent); mark_btree_node_locked(trans, path, n->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, path, n); n->sib_u64s[0] = U16_MAX; n->sib_u64s[1] = U16_MAX; bch2_keylist_add(&as->parent_keys, &b->key); btree_split_insert_keys(as, trans, path_idx, n, &as->parent_keys); int ret = bch2_btree_set_root(as, trans, path, n, true); BUG_ON(ret); bch2_btree_update_get_open_buckets(as, n); bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0); bch2_trans_node_add(trans, path, n); six_unlock_intent(&n->c.lock); mutex_lock(&c->btree_cache.lock); list_add_tail(&b->list, &c->btree_cache.live[btree_node_pinned(b)].list); mutex_unlock(&c->btree_cache.lock); bch2_trans_verify_locks(trans); } int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path, unsigned flags) { struct bch_fs *c = trans->c; struct btree *b = bch2_btree_id_root(c, trans->paths[path].btree_id)->b; if (btree_node_fake(b)) return bch2_btree_split_leaf(trans, path, flags); struct btree_update *as = bch2_btree_update_start(trans, trans->paths + path, b->c.level, true, 0, flags); if (IS_ERR(as)) return PTR_ERR(as); __btree_increase_depth(as, trans, path); bch2_btree_update_done(as, trans); return 0; } int __bch2_foreground_maybe_merge(struct btree_trans *trans, btree_path_idx_t path, unsigned level, unsigned flags, enum btree_node_sibling sib) { struct bch_fs *c = trans->c; struct btree_update *as; struct bkey_format_state new_s; struct bkey_format new_f; struct bkey_i delete; struct btree *b, *m, *n, *prev, *next, *parent; struct bpos sib_pos; size_t sib_u64s; enum btree_id btree = trans->paths[path].btree_id; btree_path_idx_t sib_path = 0, new_path = 0; u64 start_time = local_clock(); int ret = 0; bch2_trans_verify_not_unlocked_or_in_restart(trans); BUG_ON(!trans->paths[path].should_be_locked); BUG_ON(!btree_node_locked(&trans->paths[path], level)); /* * Work around a deadlock caused by the btree write buffer not doing * merges and leaving tons of merges for us to do - we really don't need * to be doing merges at all from the interior update path, and if the * interior update path is generating too many new interior updates we * deadlock: */ if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates) return 0; if ((flags & BCH_WATERMARK_MASK) <= BCH_WATERMARK_reclaim) { flags &= ~BCH_WATERMARK_MASK; flags |= BCH_WATERMARK_btree; flags |= BCH_TRANS_COMMIT_journal_reclaim; } b = trans->paths[path].l[level].b; if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) || (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) { b->sib_u64s[sib] = U16_MAX; return 0; } sib_pos = sib == btree_prev_sib ? bpos_predecessor(b->data->min_key) : bpos_successor(b->data->max_key); sib_path = bch2_path_get(trans, btree, sib_pos, U8_MAX, level, BTREE_ITER_intent, _THIS_IP_); ret = bch2_btree_path_traverse(trans, sib_path, false); if (ret) goto err; btree_path_set_should_be_locked(trans, trans->paths + sib_path); m = trans->paths[sib_path].l[level].b; if (btree_node_parent(trans->paths + path, b) != btree_node_parent(trans->paths + sib_path, m)) { b->sib_u64s[sib] = U16_MAX; goto out; } if (sib == btree_prev_sib) { prev = m; next = b; } else { prev = b; next = m; } if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) { struct printbuf buf = PRINTBUF; printbuf_indent_add_nextline(&buf, 2); prt_printf(&buf, "%s(): ", __func__); ret = __bch2_topology_error(c, &buf); prt_newline(&buf); prt_printf(&buf, "prev ends at "); bch2_bpos_to_text(&buf, prev->data->max_key); prt_newline(&buf); prt_printf(&buf, "next starts at "); bch2_bpos_to_text(&buf, next->data->min_key); bch_err(c, "%s", buf.buf); printbuf_exit(&buf); goto err; } bch2_bkey_format_init(&new_s); bch2_bkey_format_add_pos(&new_s, prev->data->min_key); __bch2_btree_calc_format(&new_s, prev); __bch2_btree_calc_format(&new_s, next); bch2_bkey_format_add_pos(&new_s, next->data->max_key); new_f = bch2_bkey_format_done(&new_s); sib_u64s = btree_node_u64s_with_format(b->nr, &b->format, &new_f) + btree_node_u64s_with_format(m->nr, &m->format, &new_f); if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) { sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c); sib_u64s /= 2; sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c); } sib_u64s = min(sib_u64s, btree_max_u64s(c)); sib_u64s = min(sib_u64s, (size_t) U16_MAX - 1); b->sib_u64s[sib] = sib_u64s; if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold) goto out; parent = btree_node_parent(trans->paths + path, b); as = bch2_btree_update_start(trans, trans->paths + path, level, false, 0, BCH_TRANS_COMMIT_no_enospc|flags); ret = PTR_ERR_OR_ZERO(as); if (ret) goto err; as->node_start = prev->data->min_key; as->node_end = next->data->max_key; trace_and_count(c, btree_node_merge, trans, b); n = bch2_btree_node_alloc(as, trans, b->c.level); SET_BTREE_NODE_SEQ(n->data, max(BTREE_NODE_SEQ(b->data), BTREE_NODE_SEQ(m->data)) + 1); btree_set_min(n, prev->data->min_key); btree_set_max(n, next->data->max_key); n->data->format = new_f; btree_node_set_format(n, new_f); bch2_btree_sort_into(c, n, prev); bch2_btree_sort_into(c, n, next); bch2_btree_build_aux_trees(n); bch2_btree_update_add_new_node(as, n); six_unlock_write(&n->c.lock); new_path = bch2_path_get_unlocked_mut(trans, btree, n->c.level, n->key.k.p); six_lock_increment(&n->c.lock, SIX_LOCK_intent); mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, trans->paths + new_path, n); bkey_init(&delete.k); delete.k.p = prev->key.k.p; bch2_keylist_add(&as->parent_keys, &delete); bch2_keylist_add(&as->parent_keys, &n->key); bch2_trans_verify_paths(trans); ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys); if (ret) goto err_free_update; bch2_btree_interior_update_will_free_node(as, b); bch2_btree_interior_update_will_free_node(as, m); bch2_trans_verify_paths(trans); bch2_btree_update_get_open_buckets(as, n); bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0); bch2_btree_node_free_inmem(trans, trans->paths + path, b); bch2_btree_node_free_inmem(trans, trans->paths + sib_path, m); bch2_trans_node_add(trans, trans->paths + path, n); bch2_trans_verify_paths(trans); six_unlock_intent(&n->c.lock); bch2_btree_update_done(as, trans); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time); out: err: if (new_path) bch2_path_put(trans, new_path, true); bch2_path_put(trans, sib_path, true); bch2_trans_verify_locks(trans); if (ret == -BCH_ERR_journal_reclaim_would_deadlock) ret = 0; if (!ret) ret = bch2_trans_relock(trans); return ret; err_free_update: bch2_btree_node_free_never_used(as, trans, n); bch2_btree_update_free(as, trans); goto out; } static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter, struct btree *b) { bch2_trans_node_iter_init(trans, iter, b->c.btree_id, b->key.k.p, BTREE_MAX_DEPTH, b->c.level, BTREE_ITER_intent); int ret = bch2_btree_iter_traverse(trans, iter); if (ret) goto err; /* has node been freed? */ if (btree_iter_path(trans, iter)->l[b->c.level].b != b) { /* node has been freed: */ BUG_ON(!btree_node_dying(b)); ret = bch_err_throw(trans->c, btree_node_dying); goto err; } BUG_ON(!btree_node_hashed(b)); return 0; err: bch2_trans_iter_exit(trans, iter); return ret; } int bch2_btree_node_rewrite(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, unsigned target, unsigned flags) { struct bch_fs *c = trans->c; struct btree *n, *parent; struct btree_update *as; btree_path_idx_t new_path = 0; int ret; flags |= BCH_TRANS_COMMIT_no_enospc; struct btree_path *path = btree_iter_path(trans, iter); parent = btree_node_parent(path, b); as = bch2_btree_update_start(trans, path, b->c.level, false, target, flags); ret = PTR_ERR_OR_ZERO(as); if (ret) goto out; n = bch2_btree_node_alloc_replacement(as, trans, b); bch2_btree_build_aux_trees(n); bch2_btree_update_add_new_node(as, n); six_unlock_write(&n->c.lock); new_path = bch2_path_get_unlocked_mut(trans, iter->btree_id, n->c.level, n->key.k.p); six_lock_increment(&n->c.lock, SIX_LOCK_intent); mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED); bch2_btree_path_level_init(trans, trans->paths + new_path, n); trace_and_count(c, btree_node_rewrite, trans, b); if (parent) { bch2_keylist_add(&as->parent_keys, &n->key); ret = bch2_btree_insert_node(as, trans, iter->path, parent, &as->parent_keys); } else { ret = bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n, false); } if (ret) goto err; bch2_btree_interior_update_will_free_node(as, b); bch2_btree_update_get_open_buckets(as, n); bch2_btree_node_write_trans(trans, n, SIX_LOCK_intent, 0); bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b); bch2_trans_node_add(trans, trans->paths + iter->path, n); six_unlock_intent(&n->c.lock); bch2_btree_update_done(as, trans); out: if (new_path) bch2_path_put(trans, new_path, true); bch2_trans_downgrade(trans); return ret; err: bch2_btree_node_free_never_used(as, trans, n); bch2_btree_update_free(as, trans); goto out; } static int bch2_btree_node_rewrite_key(struct btree_trans *trans, enum btree_id btree, unsigned level, struct bkey_i *k, unsigned flags) { struct btree_iter iter; bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, BTREE_MAX_DEPTH, level, 0); struct btree *b = bch2_btree_iter_peek_node(trans, &iter); int ret = PTR_ERR_OR_ZERO(b); if (ret) goto out; bool found = b && btree_ptr_hash_val(&b->key) == btree_ptr_hash_val(k); ret = found ? bch2_btree_node_rewrite(trans, &iter, b, 0, flags) : -ENOENT; out: bch2_trans_iter_exit(trans, &iter); return ret; } int bch2_btree_node_rewrite_pos(struct btree_trans *trans, enum btree_id btree, unsigned level, struct bpos pos, unsigned target, unsigned flags) { BUG_ON(!level); /* Traverse one depth lower to get a pointer to the node itself: */ struct btree_iter iter; bch2_trans_node_iter_init(trans, &iter, btree, pos, 0, level - 1, 0); struct btree *b = bch2_btree_iter_peek_node(trans, &iter); int ret = PTR_ERR_OR_ZERO(b); if (ret) goto err; ret = bch2_btree_node_rewrite(trans, &iter, b, target, flags); err: bch2_trans_iter_exit(trans, &iter); return ret; } int bch2_btree_node_rewrite_key_get_iter(struct btree_trans *trans, struct btree *b, unsigned flags) { struct btree_iter iter; int ret = get_iter_to_node(trans, &iter, b); if (ret) return ret == -BCH_ERR_btree_node_dying ? 0 : ret; ret = bch2_btree_node_rewrite(trans, &iter, b, 0, flags); bch2_trans_iter_exit(trans, &iter); return ret; } struct async_btree_rewrite { struct bch_fs *c; struct work_struct work; struct list_head list; enum btree_id btree_id; unsigned level; struct bkey_buf key; }; static void async_btree_node_rewrite_work(struct work_struct *work) { struct async_btree_rewrite *a = container_of(work, struct async_btree_rewrite, work); struct bch_fs *c = a->c; int ret = bch2_trans_do(c, bch2_btree_node_rewrite_key(trans, a->btree_id, a->level, a->key.k, 0)); if (ret != -ENOENT && !bch2_err_matches(ret, EROFS) && ret != -BCH_ERR_journal_shutdown) bch_err_fn_ratelimited(c, ret); spin_lock(&c->btree_node_rewrites_lock); list_del(&a->list); spin_unlock(&c->btree_node_rewrites_lock); closure_wake_up(&c->btree_node_rewrites_wait); bch2_bkey_buf_exit(&a->key, c); enumerated_ref_put(&c->writes, BCH_WRITE_REF_node_rewrite); kfree(a); } void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b) { struct async_btree_rewrite *a = kmalloc(sizeof(*a), GFP_NOFS); if (!a) return; a->c = c; a->btree_id = b->c.btree_id; a->level = b->c.level; INIT_WORK(&a->work, async_btree_node_rewrite_work); bch2_bkey_buf_init(&a->key); bch2_bkey_buf_copy(&a->key, c, &b->key); bool now = false, pending = false; spin_lock(&c->btree_node_rewrites_lock); if (c->recovery.passes_complete & BIT_ULL(BCH_RECOVERY_PASS_journal_replay) && enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_node_rewrite)) { list_add(&a->list, &c->btree_node_rewrites); now = true; } else if (!test_bit(BCH_FS_may_go_rw, &c->flags)) { list_add(&a->list, &c->btree_node_rewrites_pending); pending = true; } spin_unlock(&c->btree_node_rewrites_lock); if (now) { queue_work(c->btree_node_rewrite_worker, &a->work); } else if (pending) { /* bch2_do_pending_node_rewrites will execute */ } else { bch2_bkey_buf_exit(&a->key, c); kfree(a); } } void bch2_async_btree_node_rewrites_flush(struct bch_fs *c) { closure_wait_event(&c->btree_node_rewrites_wait, list_empty(&c->btree_node_rewrites)); } void bch2_do_pending_node_rewrites(struct bch_fs *c) { while (1) { spin_lock(&c->btree_node_rewrites_lock); struct async_btree_rewrite *a = list_pop_entry(&c->btree_node_rewrites_pending, struct async_btree_rewrite, list); if (a) list_add(&a->list, &c->btree_node_rewrites); spin_unlock(&c->btree_node_rewrites_lock); if (!a) break; enumerated_ref_get(&c->writes, BCH_WRITE_REF_node_rewrite); queue_work(c->btree_node_rewrite_worker, &a->work); } } void bch2_free_pending_node_rewrites(struct bch_fs *c) { while (1) { spin_lock(&c->btree_node_rewrites_lock); struct async_btree_rewrite *a = list_pop_entry(&c->btree_node_rewrites_pending, struct async_btree_rewrite, list); spin_unlock(&c->btree_node_rewrites_lock); if (!a) break; bch2_bkey_buf_exit(&a->key, c); kfree(a); } } static int __bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, struct btree *new_hash, struct bkey_i *new_key, unsigned commit_flags, bool skip_triggers) { struct bch_fs *c = trans->c; struct btree_iter iter2 = {}; struct btree *parent; int ret; if (!skip_triggers) { ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key), BTREE_TRIGGER_transactional) ?: bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1, bkey_i_to_s(new_key), BTREE_TRIGGER_transactional); if (ret) return ret; } if (new_hash) { bkey_copy(&new_hash->key, new_key); ret = bch2_btree_node_hash_insert(&c->btree_cache, new_hash, b->c.level, b->c.btree_id); BUG_ON(ret); } parent = btree_node_parent(btree_iter_path(trans, iter), b); if (parent) { bch2_trans_copy_iter(trans, &iter2, iter); iter2.path = bch2_btree_path_make_mut(trans, iter2.path, iter2.flags & BTREE_ITER_intent, _THIS_IP_); struct btree_path *path2 = btree_iter_path(trans, &iter2); BUG_ON(path2->level != b->c.level); BUG_ON(!bpos_eq(path2->pos, new_key->k.p)); btree_path_set_level_up(trans, path2); trans->paths_sorted = false; ret = bch2_btree_iter_traverse(trans, &iter2) ?: bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun); if (ret) goto err; } else { BUG_ON(btree_node_root(c, b) != b); struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(new_key->k.u64s)); ret = PTR_ERR_OR_ZERO(e); if (ret) return ret; journal_entry_set(e, BCH_JSET_ENTRY_btree_root, b->c.btree_id, b->c.level, new_key, new_key->k.u64s); } ret = bch2_trans_commit(trans, NULL, NULL, commit_flags); if (ret) goto err; bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c); if (new_hash) { mutex_lock(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, new_hash); __bch2_btree_node_hash_remove(&c->btree_cache, b); bkey_copy(&b->key, new_key); ret = __bch2_btree_node_hash_insert(&c->btree_cache, b); BUG_ON(ret); mutex_unlock(&c->btree_cache.lock); } else { bkey_copy(&b->key, new_key); } bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b); out: bch2_trans_iter_exit(trans, &iter2); return ret; err: if (new_hash) { mutex_lock(&c->btree_cache.lock); bch2_btree_node_hash_remove(&c->btree_cache, b); mutex_unlock(&c->btree_cache.lock); } goto out; } int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, struct btree *b, struct bkey_i *new_key, unsigned commit_flags, bool skip_triggers) { struct bch_fs *c = trans->c; struct btree *new_hash = NULL; struct btree_path *path = btree_iter_path(trans, iter); struct closure cl; int ret = 0; ret = bch2_btree_path_upgrade(trans, path, b->c.level + 1); if (ret) return ret; closure_init_stack(&cl); /* * check btree_ptr_hash_val() after @b is locked by * btree_iter_traverse(): */ if (btree_ptr_hash_val(new_key) != b->hash_val) { ret = bch2_btree_cache_cannibalize_lock(trans, &cl); if (ret) { ret = drop_locks_do(trans, (closure_sync(&cl), 0)); if (ret) return ret; } new_hash = bch2_btree_node_mem_alloc(trans, false); ret = PTR_ERR_OR_ZERO(new_hash); if (ret) goto err; } path->intent_ref++; ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key, commit_flags, skip_triggers); --path->intent_ref; if (new_hash) bch2_btree_node_to_freelist(c, new_hash); err: closure_sync(&cl); bch2_btree_cache_cannibalize_unlock(trans); return ret; } int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, struct btree *b, struct bkey_i *new_key, unsigned commit_flags, bool skip_triggers) { struct btree_iter iter; int ret = get_iter_to_node(trans, &iter, b); if (ret) return ret == -BCH_ERR_btree_node_dying ? 0 : ret; bch2_bkey_drop_ptrs(bkey_i_to_s(new_key), ptr, !bch2_bkey_has_device(bkey_i_to_s(&b->key), ptr->dev)); ret = bch2_btree_node_update_key(trans, &iter, b, new_key, commit_flags, skip_triggers); bch2_trans_iter_exit(trans, &iter); return ret; } /* Init code: */ /* * Only for filesystem bringup, when first reading the btree roots or allocating * btree roots when initializing a new filesystem: */ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b) { BUG_ON(btree_node_root(c, b)); bch2_btree_set_root_inmem(c, b); } int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id, unsigned level) { struct bch_fs *c = trans->c; struct closure cl; struct btree *b; int ret; closure_init_stack(&cl); do { ret = bch2_btree_cache_cannibalize_lock(trans, &cl); closure_sync(&cl); } while (ret); b = bch2_btree_node_mem_alloc(trans, false); bch2_btree_cache_cannibalize_unlock(trans); ret = PTR_ERR_OR_ZERO(b); if (ret) return ret; set_btree_node_fake(b); set_btree_node_need_rewrite(b); b->c.level = level; b->c.btree_id = id; bkey_btree_ptr_init(&b->key); b->key.k.p = SPOS_MAX; *((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id; bch2_bset_init_first(b, &b->data->keys); bch2_btree_build_aux_trees(b); b->data->flags = 0; btree_set_min(b, POS_MIN); btree_set_max(b, SPOS_MAX); b->data->format = bch2_btree_calc_format(b); btree_node_set_format(b, b->data->format); ret = bch2_btree_node_hash_insert(&c->btree_cache, b, b->c.level, b->c.btree_id); BUG_ON(ret); bch2_btree_set_root_inmem(c, b); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); return 0; } void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level) { bch2_trans_run(c, lockrestart_do(trans, bch2_btree_root_alloc_fake_trans(trans, id, level))); } static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as) { prt_printf(out, "%ps: ", (void *) as->ip_started); bch2_trans_commit_flags_to_text(out, as->flags); prt_str(out, " "); bch2_btree_id_to_text(out, as->btree_id); prt_printf(out, " l=%u-%u ", as->update_level_start, as->update_level_end); bch2_bpos_to_text(out, as->node_start); prt_char(out, ' '); bch2_bpos_to_text(out, as->node_end); prt_printf(out, "\nwritten %u/%u u64s_remaining %u need_rewrite %s", as->node_written, as->node_sectors, as->node_remaining, btree_node_reawrite_reason_strs[as->node_needed_rewrite]); prt_printf(out, "\nmode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n", bch2_btree_update_modes[as->mode], as->nodes_written, closure_nr_remaining(&as->cl), as->journal.seq); } void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c) { struct btree_update *as; mutex_lock(&c->btree_interior_update_lock); list_for_each_entry(as, &c->btree_interior_update_list, list) bch2_btree_update_to_text(out, as); mutex_unlock(&c->btree_interior_update_lock); } static bool bch2_btree_interior_updates_pending(struct bch_fs *c) { bool ret; mutex_lock(&c->btree_interior_update_lock); ret = !list_empty(&c->btree_interior_update_list); mutex_unlock(&c->btree_interior_update_lock); return ret; } bool bch2_btree_interior_updates_flush(struct bch_fs *c) { bool ret = bch2_btree_interior_updates_pending(c); if (ret) closure_wait_event(&c->btree_interior_update_wait, !bch2_btree_interior_updates_pending(c)); return ret; } void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry) { struct btree_root *r = bch2_btree_id_root(c, entry->btree_id); mutex_lock(&c->btree_root_lock); r->level = entry->level; r->alive = true; bkey_copy(&r->key, (struct bkey_i *) entry->start); mutex_unlock(&c->btree_root_lock); } struct jset_entry * bch2_btree_roots_to_journal_entries(struct bch_fs *c, struct jset_entry *end, unsigned long skip) { unsigned i; mutex_lock(&c->btree_root_lock); for (i = 0; i < btree_id_nr_alive(c); i++) { struct btree_root *r = bch2_btree_id_root(c, i); if (r->alive && !test_bit(i, &skip)) { journal_entry_set(end, BCH_JSET_ENTRY_btree_root, i, r->level, &r->key, r->key.k.u64s); end = vstruct_next(end); } } mutex_unlock(&c->btree_root_lock); return end; } static void bch2_btree_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct btree_alloc *a) { printbuf_indent_add(out, 2); bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&a->k)); prt_newline(out); struct open_bucket *ob; unsigned i; open_bucket_for_each(c, &a->ob, ob, i) bch2_open_bucket_to_text(out, c, ob); printbuf_indent_sub(out, 2); } void bch2_btree_reserve_cache_to_text(struct printbuf *out, struct bch_fs *c) { for (unsigned i = 0; i < c->btree_reserve_cache_nr; i++) bch2_btree_alloc_to_text(out, c, &c->btree_reserve_cache[i]); } void bch2_fs_btree_interior_update_exit(struct bch_fs *c) { WARN_ON(!list_empty(&c->btree_node_rewrites)); WARN_ON(!list_empty(&c->btree_node_rewrites_pending)); if (c->btree_node_rewrite_worker) destroy_workqueue(c->btree_node_rewrite_worker); if (c->btree_interior_update_worker) destroy_workqueue(c->btree_interior_update_worker); mempool_exit(&c->btree_interior_update_pool); } void bch2_fs_btree_interior_update_init_early(struct bch_fs *c) { mutex_init(&c->btree_reserve_cache_lock); INIT_LIST_HEAD(&c->btree_interior_update_list); INIT_LIST_HEAD(&c->btree_interior_updates_unwritten); mutex_init(&c->btree_interior_update_lock); INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work); INIT_LIST_HEAD(&c->btree_node_rewrites); INIT_LIST_HEAD(&c->btree_node_rewrites_pending); spin_lock_init(&c->btree_node_rewrites_lock); } int bch2_fs_btree_interior_update_init(struct bch_fs *c) { c->btree_interior_update_worker = alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 8); if (!c->btree_interior_update_worker) return bch_err_throw(c, ENOMEM_btree_interior_update_worker_init); c->btree_node_rewrite_worker = alloc_ordered_workqueue("btree_node_rewrite", WQ_UNBOUND); if (!c->btree_node_rewrite_worker) return bch_err_throw(c, ENOMEM_btree_interior_update_worker_init); if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, sizeof(struct btree_update))) return bch_err_throw(c, ENOMEM_btree_interior_update_pool_init); return 0; }
8 40 39 4 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 /* * DRBG based on NIST SP800-90A * * Copyright Stephan Mueller <smueller@chronox.de>, 2014 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * ALTERNATIVELY, this product may be distributed under the terms of * the GNU General Public License, in which case the provisions of the GPL are * required INSTEAD OF the above restrictions. (This clause is * necessary due to a potential bad interaction between the GPL and * the restrictions contained in a BSD-style copyright.) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #ifndef _DRBG_H #define _DRBG_H #include <linux/random.h> #include <linux/scatterlist.h> #include <crypto/hash.h> #include <crypto/skcipher.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/slab.h> #include <crypto/internal/rng.h> #include <crypto/rng.h> #include <linux/fips.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/workqueue.h> /* * Concatenation Helper and string operation helper * * SP800-90A requires the concatenation of different data. To avoid copying * buffers around or allocate additional memory, the following data structure * is used to point to the original memory with its size. In addition, it * is used to build a linked list. The linked list defines the concatenation * of individual buffers. The order of memory block referenced in that * linked list determines the order of concatenation. */ struct drbg_string { const unsigned char *buf; size_t len; struct list_head list; }; static inline void drbg_string_fill(struct drbg_string *string, const unsigned char *buf, size_t len) { string->buf = buf; string->len = len; INIT_LIST_HEAD(&string->list); } struct drbg_state; typedef uint32_t drbg_flag_t; struct drbg_core { drbg_flag_t flags; /* flags for the cipher */ __u8 statelen; /* maximum state length */ __u8 blocklen_bytes; /* block size of output in bytes */ char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */ /* kernel crypto API backend cipher name */ char backend_cra_name[CRYPTO_MAX_ALG_NAME]; }; struct drbg_state_ops { int (*update)(struct drbg_state *drbg, struct list_head *seed, int reseed); int (*generate)(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct list_head *addtl); int (*crypto_init)(struct drbg_state *drbg); int (*crypto_fini)(struct drbg_state *drbg); }; struct drbg_test_data { struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */ }; enum drbg_seed_state { DRBG_SEED_STATE_UNSEEDED, DRBG_SEED_STATE_PARTIAL, /* Seeded with !rng_is_initialized() */ DRBG_SEED_STATE_FULL, }; struct drbg_state { struct mutex drbg_mutex; /* lock around DRBG */ unsigned char *V; /* internal state 10.1.1.1 1a) */ unsigned char *Vbuf; /* hash: static value 10.1.1.1 1b) hmac / ctr: key */ unsigned char *C; unsigned char *Cbuf; /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */ size_t reseed_ctr; size_t reseed_threshold; /* some memory the DRBG can use for its operation */ unsigned char *scratchpad; unsigned char *scratchpadbuf; void *priv_data; /* Cipher handle */ struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */ struct skcipher_request *ctr_req; /* CTR mode request handle */ __u8 *outscratchpadbuf; /* CTR mode output scratchpad */ __u8 *outscratchpad; /* CTR mode aligned outbuf */ struct crypto_wait ctr_wait; /* CTR mode async wait obj */ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */ enum drbg_seed_state seeded; /* DRBG fully seeded? */ unsigned long last_seed_time; bool pr; /* Prediction resistance enabled? */ bool fips_primed; /* Continuous test primed? */ unsigned char *prev; /* FIPS 140-2 continuous test value */ struct crypto_rng *jent; const struct drbg_state_ops *d_ops; const struct drbg_core *core; struct drbg_string test_data; }; static inline __u8 drbg_statelen(struct drbg_state *drbg) { if (drbg && drbg->core) return drbg->core->statelen; return 0; } static inline __u8 drbg_blocklen(struct drbg_state *drbg) { if (drbg && drbg->core) return drbg->core->blocklen_bytes; return 0; } static inline __u8 drbg_keylen(struct drbg_state *drbg) { if (drbg && drbg->core) return (drbg->core->statelen - drbg->core->blocklen_bytes); return 0; } static inline size_t drbg_max_request_bytes(struct drbg_state *drbg) { /* SP800-90A requires the limit 2**19 bits, but we return bytes */ return (1 << 16); } static inline size_t drbg_max_addtl(struct drbg_state *drbg) { /* SP800-90A requires 2**35 bytes additional info str / pers str */ #if (__BITS_PER_LONG == 32) /* * SP800-90A allows smaller maximum numbers to be returned -- we * return SIZE_MAX - 1 to allow the verification of the enforcement * of this value in drbg_healthcheck_sanity. */ return (SIZE_MAX - 1); #else return (1UL<<35); #endif } static inline size_t drbg_max_requests(struct drbg_state *drbg) { /* SP800-90A requires 2**48 maximum requests before reseeding */ return (1<<20); } /* * This is a wrapper to the kernel crypto API function of * crypto_rng_generate() to allow the caller to provide additional data. * * @drng DRBG handle -- see crypto_rng_get_bytes * @outbuf output buffer -- see crypto_rng_get_bytes * @outlen length of output buffer -- see crypto_rng_get_bytes * @addtl_input additional information string input buffer * @addtllen length of additional information string buffer * * return * see crypto_rng_get_bytes */ static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng, unsigned char *outbuf, unsigned int outlen, struct drbg_string *addtl) { return crypto_rng_generate(drng, addtl->buf, addtl->len, outbuf, outlen); } /* * TEST code * * This is a wrapper to the kernel crypto API function of * crypto_rng_generate() to allow the caller to provide additional data and * allow furnishing of test_data * * @drng DRBG handle -- see crypto_rng_get_bytes * @outbuf output buffer -- see crypto_rng_get_bytes * @outlen length of output buffer -- see crypto_rng_get_bytes * @addtl_input additional information string input buffer * @addtllen length of additional information string buffer * @test_data filled test data * * return * see crypto_rng_get_bytes */ static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng, unsigned char *outbuf, unsigned int outlen, struct drbg_string *addtl, struct drbg_test_data *test_data) { crypto_rng_set_entropy(drng, test_data->testentropy->buf, test_data->testentropy->len); return crypto_rng_generate(drng, addtl->buf, addtl->len, outbuf, outlen); } /* * TEST code * * This is a wrapper to the kernel crypto API function of * crypto_rng_reset() to allow the caller to provide test_data * * @drng DRBG handle -- see crypto_rng_reset * @pers personalization string input buffer * @perslen length of additional information string buffer * @test_data filled test data * * return * see crypto_rng_reset */ static inline int crypto_drbg_reset_test(struct crypto_rng *drng, struct drbg_string *pers, struct drbg_test_data *test_data) { crypto_rng_set_entropy(drng, test_data->testentropy->buf, test_data->testentropy->len); return crypto_rng_reset(drng, pers->buf, pers->len); } /* DRBG type flags */ #define DRBG_CTR ((drbg_flag_t)1<<0) #define DRBG_HMAC ((drbg_flag_t)1<<1) #define DRBG_HASH ((drbg_flag_t)1<<2) #define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH) /* DRBG strength flags */ #define DRBG_STRENGTH128 ((drbg_flag_t)1<<3) #define DRBG_STRENGTH192 ((drbg_flag_t)1<<4) #define DRBG_STRENGTH256 ((drbg_flag_t)1<<5) #define DRBG_STRENGTH_MASK (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \ DRBG_STRENGTH256) enum drbg_prefixes { DRBG_PREFIX0 = 0x00, DRBG_PREFIX1, DRBG_PREFIX2, DRBG_PREFIX3 }; #endif /* _DRBG_H */
6 4 5 4 5 2 4 2 2 1 2 2 6 3 1 1 1 6 7 1 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 // SPDX-License-Identifier: GPL-2.0+ /* * F81532/F81534 USB to Serial Ports Bridge * * F81532 => 2 Serial Ports * F81534 => 4 Serial Ports * * Copyright (C) 2016 Feature Integration Technology Inc., (Fintek) * Copyright (C) 2016 Tom Tsai (Tom_Tsai@fintek.com.tw) * Copyright (C) 2016 Peter Hong (Peter_Hong@fintek.com.tw) * * The F81532/F81534 had 1 control endpoint for setting, 1 endpoint bulk-out * for all serial port TX and 1 endpoint bulk-in for all serial port read in * (Read Data/MSR/LSR). * * Write URB is fixed with 512bytes, per serial port used 128Bytes. * It can be described by f81534_prepare_write_buffer() * * Read URB is 512Bytes max, per serial port used 128Bytes. * It can be described by f81534_process_read_urb() and maybe received with * 128x1,2,3,4 bytes. * */ #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial_reg.h> #include <linux/module.h> #include <linux/uaccess.h> /* Serial Port register Address */ #define F81534_UART_BASE_ADDRESS 0x1200 #define F81534_UART_OFFSET 0x10 #define F81534_DIVISOR_LSB_REG (0x00 + F81534_UART_BASE_ADDRESS) #define F81534_DIVISOR_MSB_REG (0x01 + F81534_UART_BASE_ADDRESS) #define F81534_INTERRUPT_ENABLE_REG (0x01 + F81534_UART_BASE_ADDRESS) #define F81534_FIFO_CONTROL_REG (0x02 + F81534_UART_BASE_ADDRESS) #define F81534_LINE_CONTROL_REG (0x03 + F81534_UART_BASE_ADDRESS) #define F81534_MODEM_CONTROL_REG (0x04 + F81534_UART_BASE_ADDRESS) #define F81534_LINE_STATUS_REG (0x05 + F81534_UART_BASE_ADDRESS) #define F81534_MODEM_STATUS_REG (0x06 + F81534_UART_BASE_ADDRESS) #define F81534_CLOCK_REG (0x08 + F81534_UART_BASE_ADDRESS) #define F81534_CONFIG1_REG (0x09 + F81534_UART_BASE_ADDRESS) #define F81534_DEF_CONF_ADDRESS_START 0x3000 #define F81534_DEF_CONF_SIZE 12 #define F81534_CUSTOM_ADDRESS_START 0x2f00 #define F81534_CUSTOM_DATA_SIZE 0x10 #define F81534_CUSTOM_NO_CUSTOM_DATA 0xff #define F81534_CUSTOM_VALID_TOKEN 0xf0 #define F81534_CONF_OFFSET 1 #define F81534_CONF_INIT_GPIO_OFFSET 4 #define F81534_CONF_WORK_GPIO_OFFSET 8 #define F81534_CONF_GPIO_SHUTDOWN 7 #define F81534_CONF_GPIO_RS232 1 #define F81534_MAX_DATA_BLOCK 64 #define F81534_MAX_BUS_RETRY 20 /* Default URB timeout for USB operations */ #define F81534_USB_MAX_RETRY 10 #define F81534_USB_TIMEOUT 2000 #define F81534_SET_GET_REGISTER 0xA0 #define F81534_NUM_PORT 4 #define F81534_UNUSED_PORT 0xff #define F81534_WRITE_BUFFER_SIZE 512 #define DRIVER_DESC "Fintek F81532/F81534" #define FINTEK_VENDOR_ID_1 0x1934 #define FINTEK_VENDOR_ID_2 0x2C42 #define FINTEK_DEVICE_ID 0x1202 #define F81534_MAX_TX_SIZE 124 #define F81534_MAX_RX_SIZE 124 #define F81534_RECEIVE_BLOCK_SIZE 128 #define F81534_MAX_RECEIVE_BLOCK_SIZE 512 #define F81534_TOKEN_RECEIVE 0x01 #define F81534_TOKEN_WRITE 0x02 #define F81534_TOKEN_TX_EMPTY 0x03 #define F81534_TOKEN_MSR_CHANGE 0x04 /* * We used interal SPI bus to access FLASH section. We must wait the SPI bus to * idle if we performed any command. * * SPI Bus status register: F81534_BUS_REG_STATUS * Bit 0/1 : BUSY * Bit 2 : IDLE */ #define F81534_BUS_BUSY (BIT(0) | BIT(1)) #define F81534_BUS_IDLE BIT(2) #define F81534_BUS_READ_DATA 0x1004 #define F81534_BUS_REG_STATUS 0x1003 #define F81534_BUS_REG_START 0x1002 #define F81534_BUS_REG_END 0x1001 #define F81534_CMD_READ 0x03 #define F81534_DEFAULT_BAUD_RATE 9600 #define F81534_PORT_CONF_RS232 0 #define F81534_PORT_CONF_RS485 BIT(0) #define F81534_PORT_CONF_RS485_INVERT (BIT(0) | BIT(1)) #define F81534_PORT_CONF_MODE_MASK GENMASK(1, 0) #define F81534_PORT_CONF_DISABLE_PORT BIT(3) #define F81534_PORT_CONF_NOT_EXIST_PORT BIT(7) #define F81534_PORT_UNAVAILABLE \ (F81534_PORT_CONF_DISABLE_PORT | F81534_PORT_CONF_NOT_EXIST_PORT) #define F81534_1X_RXTRIGGER 0xc3 #define F81534_8X_RXTRIGGER 0xcf /* * F81532/534 Clock registers (offset +08h) * * Bit0: UART Enable (always on) * Bit2-1: Clock source selector * 00: 1.846MHz. * 01: 18.46MHz. * 10: 24MHz. * 11: 14.77MHz. * Bit4: Auto direction(RTS) control (RTS pin Low when TX) * Bit5: Invert direction(RTS) when Bit4 enabled (RTS pin high when TX) */ #define F81534_UART_EN BIT(0) #define F81534_CLK_1_846_MHZ 0 #define F81534_CLK_18_46_MHZ BIT(1) #define F81534_CLK_24_MHZ BIT(2) #define F81534_CLK_14_77_MHZ (BIT(1) | BIT(2)) #define F81534_CLK_MASK GENMASK(2, 1) #define F81534_CLK_TX_DELAY_1BIT BIT(3) #define F81534_CLK_RS485_MODE BIT(4) #define F81534_CLK_RS485_INVERT BIT(5) static const struct usb_device_id f81534_id_table[] = { { USB_DEVICE(FINTEK_VENDOR_ID_1, FINTEK_DEVICE_ID) }, { USB_DEVICE(FINTEK_VENDOR_ID_2, FINTEK_DEVICE_ID) }, {} /* Terminating entry */ }; #define F81534_TX_EMPTY_BIT 0 struct f81534_serial_private { u8 conf_data[F81534_DEF_CONF_SIZE]; int tty_idx[F81534_NUM_PORT]; u8 setting_idx; int opened_port; struct mutex urb_mutex; }; struct f81534_port_private { struct mutex mcr_mutex; struct mutex lcr_mutex; struct work_struct lsr_work; struct usb_serial_port *port; unsigned long tx_empty; spinlock_t msr_lock; u32 baud_base; u8 shadow_mcr; u8 shadow_lcr; u8 shadow_msr; u8 shadow_clk; u8 phy_num; }; struct f81534_pin_data { const u16 reg_addr; const u8 reg_mask; }; struct f81534_port_out_pin { struct f81534_pin_data pin[3]; }; /* Pin output value for M2/M1/M0(SD) */ static const struct f81534_port_out_pin f81534_port_out_pins[] = { { { { 0x2ae8, BIT(7) }, { 0x2a90, BIT(5) }, { 0x2a90, BIT(4) } } }, { { { 0x2ae8, BIT(6) }, { 0x2ae8, BIT(0) }, { 0x2ae8, BIT(3) } } }, { { { 0x2a90, BIT(0) }, { 0x2ae8, BIT(2) }, { 0x2a80, BIT(6) } } }, { { { 0x2a90, BIT(3) }, { 0x2a90, BIT(2) }, { 0x2a90, BIT(1) } } }, }; static u32 const baudrate_table[] = { 115200, 921600, 1152000, 1500000 }; static u8 const clock_table[] = { F81534_CLK_1_846_MHZ, F81534_CLK_14_77_MHZ, F81534_CLK_18_46_MHZ, F81534_CLK_24_MHZ }; static int f81534_logic_to_phy_port(struct usb_serial *serial, struct usb_serial_port *port) { struct f81534_serial_private *serial_priv = usb_get_serial_data(port->serial); int count = 0; int i; for (i = 0; i < F81534_NUM_PORT; ++i) { if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE) continue; if (port->port_number == count) return i; ++count; } return -ENODEV; } static int f81534_set_register(struct usb_serial *serial, u16 reg, u8 data) { struct usb_interface *interface = serial->interface; struct usb_device *dev = serial->dev; size_t count = F81534_USB_MAX_RETRY; int status; u8 *tmp; tmp = kmalloc(sizeof(u8), GFP_KERNEL); if (!tmp) return -ENOMEM; *tmp = data; /* * Our device maybe not reply when heavily loading, We'll retry for * F81534_USB_MAX_RETRY times. */ while (count--) { status = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), F81534_SET_GET_REGISTER, USB_TYPE_VENDOR | USB_DIR_OUT, reg, 0, tmp, sizeof(u8), F81534_USB_TIMEOUT); if (status == sizeof(u8)) { status = 0; break; } } if (status < 0) { dev_err(&interface->dev, "%s: reg: %x data: %x failed: %d\n", __func__, reg, data, status); } kfree(tmp); return status; } static int f81534_get_register(struct usb_serial *serial, u16 reg, u8 *data) { struct usb_interface *interface = serial->interface; struct usb_device *dev = serial->dev; size_t count = F81534_USB_MAX_RETRY; int status; u8 *tmp; tmp = kmalloc(sizeof(u8), GFP_KERNEL); if (!tmp) return -ENOMEM; /* * Our device maybe not reply when heavily loading, We'll retry for * F81534_USB_MAX_RETRY times. */ while (count--) { status = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), F81534_SET_GET_REGISTER, USB_TYPE_VENDOR | USB_DIR_IN, reg, 0, tmp, sizeof(u8), F81534_USB_TIMEOUT); if (status > 0) { status = 0; break; } else if (status == 0) { status = -EIO; } } if (status < 0) { dev_err(&interface->dev, "%s: reg: %x failed: %d\n", __func__, reg, status); goto end; } *data = *tmp; end: kfree(tmp); return status; } static int f81534_set_mask_register(struct usb_serial *serial, u16 reg, u8 mask, u8 data) { int status; u8 tmp; status = f81534_get_register(serial, reg, &tmp); if (status) return status; tmp &= ~mask; tmp |= (mask & data); return f81534_set_register(serial, reg, tmp); } static int f81534_set_phy_port_register(struct usb_serial *serial, int phy, u16 reg, u8 data) { return f81534_set_register(serial, reg + F81534_UART_OFFSET * phy, data); } static int f81534_get_phy_port_register(struct usb_serial *serial, int phy, u16 reg, u8 *data) { return f81534_get_register(serial, reg + F81534_UART_OFFSET * phy, data); } static int f81534_set_port_register(struct usb_serial_port *port, u16 reg, u8 data) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); return f81534_set_register(port->serial, reg + port_priv->phy_num * F81534_UART_OFFSET, data); } static int f81534_get_port_register(struct usb_serial_port *port, u16 reg, u8 *data) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); return f81534_get_register(port->serial, reg + port_priv->phy_num * F81534_UART_OFFSET, data); } /* * If we try to access the internal flash via SPI bus, we should check the bus * status for every command. e.g., F81534_BUS_REG_START/F81534_BUS_REG_END */ static int f81534_wait_for_spi_idle(struct usb_serial *serial) { size_t count = F81534_MAX_BUS_RETRY; u8 tmp; int status; do { status = f81534_get_register(serial, F81534_BUS_REG_STATUS, &tmp); if (status) return status; if (tmp & F81534_BUS_BUSY) continue; if (tmp & F81534_BUS_IDLE) break; } while (--count); if (!count) { dev_err(&serial->interface->dev, "%s: timed out waiting for idle SPI bus\n", __func__); return -EIO; } return f81534_set_register(serial, F81534_BUS_REG_STATUS, tmp & ~F81534_BUS_IDLE); } static int f81534_get_spi_register(struct usb_serial *serial, u16 reg, u8 *data) { int status; status = f81534_get_register(serial, reg, data); if (status) return status; return f81534_wait_for_spi_idle(serial); } static int f81534_set_spi_register(struct usb_serial *serial, u16 reg, u8 data) { int status; status = f81534_set_register(serial, reg, data); if (status) return status; return f81534_wait_for_spi_idle(serial); } static int f81534_read_flash(struct usb_serial *serial, u32 address, size_t size, u8 *buf) { u8 tmp_buf[F81534_MAX_DATA_BLOCK]; size_t block = 0; size_t read_size; size_t count; int status; int offset; u16 reg_tmp; status = f81534_set_spi_register(serial, F81534_BUS_REG_START, F81534_CMD_READ); if (status) return status; status = f81534_set_spi_register(serial, F81534_BUS_REG_START, (address >> 16) & 0xff); if (status) return status; status = f81534_set_spi_register(serial, F81534_BUS_REG_START, (address >> 8) & 0xff); if (status) return status; status = f81534_set_spi_register(serial, F81534_BUS_REG_START, (address >> 0) & 0xff); if (status) return status; /* Continuous read mode */ do { read_size = min_t(size_t, F81534_MAX_DATA_BLOCK, size); for (count = 0; count < read_size; ++count) { /* To write F81534_BUS_REG_END when final byte */ if (size <= F81534_MAX_DATA_BLOCK && read_size == count + 1) reg_tmp = F81534_BUS_REG_END; else reg_tmp = F81534_BUS_REG_START; /* * Dummy code, force IC to generate a read pulse, the * set of value 0xf1 is dont care (any value is ok) */ status = f81534_set_spi_register(serial, reg_tmp, 0xf1); if (status) return status; status = f81534_get_spi_register(serial, F81534_BUS_READ_DATA, &tmp_buf[count]); if (status) return status; offset = count + block * F81534_MAX_DATA_BLOCK; buf[offset] = tmp_buf[count]; } size -= read_size; ++block; } while (size); return 0; } static void f81534_prepare_write_buffer(struct usb_serial_port *port, u8 *buf) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); int phy_num = port_priv->phy_num; u8 tx_len; int i; /* * The block layout is fixed with 4x128 Bytes, per 128 Bytes a port. * index 0: port phy idx (e.g., 0,1,2,3) * index 1: only F81534_TOKEN_WRITE * index 2: serial TX out length * index 3: fix to 0 * index 4~127: serial out data block */ for (i = 0; i < F81534_NUM_PORT; ++i) { buf[i * F81534_RECEIVE_BLOCK_SIZE] = i; buf[i * F81534_RECEIVE_BLOCK_SIZE + 1] = F81534_TOKEN_WRITE; buf[i * F81534_RECEIVE_BLOCK_SIZE + 2] = 0; buf[i * F81534_RECEIVE_BLOCK_SIZE + 3] = 0; } tx_len = kfifo_out_locked(&port->write_fifo, &buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 4], F81534_MAX_TX_SIZE, &port->lock); buf[phy_num * F81534_RECEIVE_BLOCK_SIZE + 2] = tx_len; } static int f81534_submit_writer(struct usb_serial_port *port, gfp_t mem_flags) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); struct urb *urb; unsigned long flags; int result; /* Check is any data in write_fifo */ spin_lock_irqsave(&port->lock, flags); if (kfifo_is_empty(&port->write_fifo)) { spin_unlock_irqrestore(&port->lock, flags); return 0; } spin_unlock_irqrestore(&port->lock, flags); /* Check H/W is TXEMPTY */ if (!test_and_clear_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty)) return 0; urb = port->write_urbs[0]; f81534_prepare_write_buffer(port, port->bulk_out_buffers[0]); urb->transfer_buffer_length = F81534_WRITE_BUFFER_SIZE; result = usb_submit_urb(urb, mem_flags); if (result) { set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty); dev_err(&port->dev, "%s: submit failed: %d\n", __func__, result); return result; } usb_serial_port_softint(port); return 0; } static u32 f81534_calc_baud_divisor(u32 baudrate, u32 clockrate) { /* Round to nearest divisor */ return DIV_ROUND_CLOSEST(clockrate, baudrate); } static int f81534_find_clk(u32 baudrate) { int idx; for (idx = 0; idx < ARRAY_SIZE(baudrate_table); ++idx) { if (baudrate <= baudrate_table[idx] && baudrate_table[idx] % baudrate == 0) return idx; } return -EINVAL; } static int f81534_set_port_config(struct usb_serial_port *port, struct tty_struct *tty, u32 baudrate, u32 old_baudrate, u8 lcr) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); u32 divisor; int status; int i; int idx; u8 value; u32 baud_list[] = {baudrate, old_baudrate, F81534_DEFAULT_BAUD_RATE}; for (i = 0; i < ARRAY_SIZE(baud_list); ++i) { baudrate = baud_list[i]; if (baudrate == 0) { tty_encode_baud_rate(tty, 0, 0); return 0; } idx = f81534_find_clk(baudrate); if (idx >= 0) { tty_encode_baud_rate(tty, baudrate, baudrate); break; } } if (idx < 0) return -EINVAL; port_priv->baud_base = baudrate_table[idx]; port_priv->shadow_clk &= ~F81534_CLK_MASK; port_priv->shadow_clk |= clock_table[idx]; status = f81534_set_port_register(port, F81534_CLOCK_REG, port_priv->shadow_clk); if (status) { dev_err(&port->dev, "CLOCK_REG setting failed\n"); return status; } if (baudrate <= 1200) value = F81534_1X_RXTRIGGER; /* 128 FIFO & TL: 1x */ else value = F81534_8X_RXTRIGGER; /* 128 FIFO & TL: 8x */ status = f81534_set_port_register(port, F81534_CONFIG1_REG, value); if (status) { dev_err(&port->dev, "%s: CONFIG1 setting failed\n", __func__); return status; } if (baudrate <= 1200) value = UART_FCR_TRIGGER_1 | UART_FCR_ENABLE_FIFO; /* TL: 1 */ else value = UART_FCR_TRIGGER_8 | UART_FCR_ENABLE_FIFO; /* TL: 8 */ status = f81534_set_port_register(port, F81534_FIFO_CONTROL_REG, value); if (status) { dev_err(&port->dev, "%s: FCR setting failed\n", __func__); return status; } divisor = f81534_calc_baud_divisor(baudrate, port_priv->baud_base); mutex_lock(&port_priv->lcr_mutex); value = UART_LCR_DLAB; status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, value); if (status) { dev_err(&port->dev, "%s: set LCR failed\n", __func__); goto out_unlock; } value = divisor & 0xff; status = f81534_set_port_register(port, F81534_DIVISOR_LSB_REG, value); if (status) { dev_err(&port->dev, "%s: set DLAB LSB failed\n", __func__); goto out_unlock; } value = (divisor >> 8) & 0xff; status = f81534_set_port_register(port, F81534_DIVISOR_MSB_REG, value); if (status) { dev_err(&port->dev, "%s: set DLAB MSB failed\n", __func__); goto out_unlock; } value = lcr | (port_priv->shadow_lcr & UART_LCR_SBC); status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, value); if (status) { dev_err(&port->dev, "%s: set LCR failed\n", __func__); goto out_unlock; } port_priv->shadow_lcr = value; out_unlock: mutex_unlock(&port_priv->lcr_mutex); return status; } static int f81534_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct f81534_port_private *port_priv = usb_get_serial_port_data(port); int status; mutex_lock(&port_priv->lcr_mutex); if (break_state) port_priv->shadow_lcr |= UART_LCR_SBC; else port_priv->shadow_lcr &= ~UART_LCR_SBC; status = f81534_set_port_register(port, F81534_LINE_CONTROL_REG, port_priv->shadow_lcr); if (status) dev_err(&port->dev, "set break failed: %d\n", status); mutex_unlock(&port_priv->lcr_mutex); return status; } static int f81534_update_mctrl(struct usb_serial_port *port, unsigned int set, unsigned int clear) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); int status; u8 tmp; if (((set | clear) & (TIOCM_DTR | TIOCM_RTS)) == 0) return 0; /* no change */ mutex_lock(&port_priv->mcr_mutex); /* 'Set' takes precedence over 'Clear' */ clear &= ~set; /* Always enable UART_MCR_OUT2 */ tmp = UART_MCR_OUT2 | port_priv->shadow_mcr; if (clear & TIOCM_DTR) tmp &= ~UART_MCR_DTR; if (clear & TIOCM_RTS) tmp &= ~UART_MCR_RTS; if (set & TIOCM_DTR) tmp |= UART_MCR_DTR; if (set & TIOCM_RTS) tmp |= UART_MCR_RTS; status = f81534_set_port_register(port, F81534_MODEM_CONTROL_REG, tmp); if (status < 0) { dev_err(&port->dev, "%s: MCR write failed\n", __func__); mutex_unlock(&port_priv->mcr_mutex); return status; } port_priv->shadow_mcr = tmp; mutex_unlock(&port_priv->mcr_mutex); return 0; } /* * This function will search the data area with token F81534_CUSTOM_VALID_TOKEN * for latest configuration index. If nothing found * (*index = F81534_CUSTOM_NO_CUSTOM_DATA), We'll load default configure in * F81534_DEF_CONF_ADDRESS_START section. * * Due to we only use block0 to save data, so *index should be 0 or * F81534_CUSTOM_NO_CUSTOM_DATA. */ static int f81534_find_config_idx(struct usb_serial *serial, u8 *index) { u8 tmp; int status; status = f81534_read_flash(serial, F81534_CUSTOM_ADDRESS_START, 1, &tmp); if (status) { dev_err(&serial->interface->dev, "%s: read failed: %d\n", __func__, status); return status; } /* We'll use the custom data when the data is valid. */ if (tmp == F81534_CUSTOM_VALID_TOKEN) *index = 0; else *index = F81534_CUSTOM_NO_CUSTOM_DATA; return 0; } /* * The F81532/534 will not report serial port to USB serial subsystem when * H/W DCD/DSR/CTS/RI/RX pin connected to ground. * * To detect RX pin status, we'll enable MCR interal loopback, disable it and * delayed for 60ms. It connected to ground If LSR register report UART_LSR_BI. */ static bool f81534_check_port_hw_disabled(struct usb_serial *serial, int phy) { int status; u8 old_mcr; u8 msr; u8 lsr; u8 msr_mask; msr_mask = UART_MSR_DCD | UART_MSR_RI | UART_MSR_DSR | UART_MSR_CTS; status = f81534_get_phy_port_register(serial, phy, F81534_MODEM_STATUS_REG, &msr); if (status) return false; if ((msr & msr_mask) != msr_mask) return false; status = f81534_set_phy_port_register(serial, phy, F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); if (status) return false; status = f81534_get_phy_port_register(serial, phy, F81534_MODEM_CONTROL_REG, &old_mcr); if (status) return false; status = f81534_set_phy_port_register(serial, phy, F81534_MODEM_CONTROL_REG, UART_MCR_LOOP); if (status) return false; status = f81534_set_phy_port_register(serial, phy, F81534_MODEM_CONTROL_REG, 0x0); if (status) return false; msleep(60); status = f81534_get_phy_port_register(serial, phy, F81534_LINE_STATUS_REG, &lsr); if (status) return false; status = f81534_set_phy_port_register(serial, phy, F81534_MODEM_CONTROL_REG, old_mcr); if (status) return false; if ((lsr & UART_LSR_BI) == UART_LSR_BI) return true; return false; } /* * We had 2 generation of F81532/534 IC. All has an internal storage. * * 1st is pure USB-to-TTL RS232 IC and designed for 4 ports only, no any * internal data will used. All mode and gpio control should manually set * by AP or Driver and all storage space value are 0xff. The * f81534_calc_num_ports() will run to final we marked as "oldest version" * for this IC. * * 2rd is designed to more generic to use any transceiver and this is our * mass production type. We'll save data in F81534_CUSTOM_ADDRESS_START * (0x2f00) with 9bytes. The 1st byte is a indicater. If the token is * F81534_CUSTOM_VALID_TOKEN(0xf0), the IC is 2nd gen type, the following * 4bytes save port mode (0:RS232/1:RS485 Invert/2:RS485), and the last * 4bytes save GPIO state(value from 0~7 to represent 3 GPIO output pin). * The f81534_calc_num_ports() will run to "new style" with checking * F81534_PORT_UNAVAILABLE section. */ static int f81534_calc_num_ports(struct usb_serial *serial, struct usb_serial_endpoints *epds) { struct f81534_serial_private *serial_priv; struct device *dev = &serial->interface->dev; int size_bulk_in = usb_endpoint_maxp(epds->bulk_in[0]); int size_bulk_out = usb_endpoint_maxp(epds->bulk_out[0]); u8 num_port = 0; int index = 0; int status; int i; if (size_bulk_out != F81534_WRITE_BUFFER_SIZE || size_bulk_in != F81534_MAX_RECEIVE_BLOCK_SIZE) { dev_err(dev, "unsupported endpoint max packet size\n"); return -ENODEV; } serial_priv = devm_kzalloc(&serial->interface->dev, sizeof(*serial_priv), GFP_KERNEL); if (!serial_priv) return -ENOMEM; usb_set_serial_data(serial, serial_priv); mutex_init(&serial_priv->urb_mutex); /* Check had custom setting */ status = f81534_find_config_idx(serial, &serial_priv->setting_idx); if (status) { dev_err(&serial->interface->dev, "%s: find idx failed: %d\n", __func__, status); return status; } /* * We'll read custom data only when data available, otherwise we'll * read default value instead. */ if (serial_priv->setting_idx != F81534_CUSTOM_NO_CUSTOM_DATA) { status = f81534_read_flash(serial, F81534_CUSTOM_ADDRESS_START + F81534_CONF_OFFSET, sizeof(serial_priv->conf_data), serial_priv->conf_data); if (status) { dev_err(&serial->interface->dev, "%s: get custom data failed: %d\n", __func__, status); return status; } dev_dbg(&serial->interface->dev, "%s: read config from block: %d\n", __func__, serial_priv->setting_idx); } else { /* Read default board setting */ status = f81534_read_flash(serial, F81534_DEF_CONF_ADDRESS_START, sizeof(serial_priv->conf_data), serial_priv->conf_data); if (status) { dev_err(&serial->interface->dev, "%s: read failed: %d\n", __func__, status); return status; } dev_dbg(&serial->interface->dev, "%s: read default config\n", __func__); } /* New style, find all possible ports */ for (i = 0; i < F81534_NUM_PORT; ++i) { if (f81534_check_port_hw_disabled(serial, i)) serial_priv->conf_data[i] |= F81534_PORT_UNAVAILABLE; if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE) continue; ++num_port; } if (!num_port) { dev_warn(&serial->interface->dev, "no config found, assuming 4 ports\n"); num_port = 4; /* Nothing found, oldest version IC */ } /* Assign phy-to-logic mapping */ for (i = 0; i < F81534_NUM_PORT; ++i) { if (serial_priv->conf_data[i] & F81534_PORT_UNAVAILABLE) continue; serial_priv->tty_idx[i] = index++; dev_dbg(&serial->interface->dev, "%s: phy_num: %d, tty_idx: %d\n", __func__, i, serial_priv->tty_idx[i]); } /* * Setup bulk-out endpoint multiplexing. All ports share the same * bulk-out endpoint. */ BUILD_BUG_ON(ARRAY_SIZE(epds->bulk_out) < F81534_NUM_PORT); for (i = 1; i < num_port; ++i) epds->bulk_out[i] = epds->bulk_out[0]; epds->num_bulk_out = num_port; return num_port; } static void f81534_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { u8 new_lcr = 0; int status; u32 baud; u32 old_baud; if (C_BAUD(tty) == B0) f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS); else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0); if (C_PARENB(tty)) { new_lcr |= UART_LCR_PARITY; if (!C_PARODD(tty)) new_lcr |= UART_LCR_EPAR; if (C_CMSPAR(tty)) new_lcr |= UART_LCR_SPAR; } if (C_CSTOPB(tty)) new_lcr |= UART_LCR_STOP; new_lcr |= UART_LCR_WLEN(tty_get_char_size(tty->termios.c_cflag)); baud = tty_get_baud_rate(tty); if (!baud) return; if (old_termios) old_baud = tty_termios_baud_rate(old_termios); else old_baud = F81534_DEFAULT_BAUD_RATE; dev_dbg(&port->dev, "%s: baud: %d\n", __func__, baud); status = f81534_set_port_config(port, tty, baud, old_baud, new_lcr); if (status < 0) { dev_err(&port->dev, "%s: set port config failed: %d\n", __func__, status); } } static int f81534_submit_read_urb(struct usb_serial *serial, gfp_t flags) { return usb_serial_generic_submit_read_urbs(serial->port[0], flags); } static void f81534_msr_changed(struct usb_serial_port *port, u8 msr) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); struct tty_struct *tty; unsigned long flags; u8 old_msr; if (!(msr & UART_MSR_ANY_DELTA)) return; spin_lock_irqsave(&port_priv->msr_lock, flags); old_msr = port_priv->shadow_msr; port_priv->shadow_msr = msr; spin_unlock_irqrestore(&port_priv->msr_lock, flags); dev_dbg(&port->dev, "%s: MSR from %02x to %02x\n", __func__, old_msr, msr); /* Update input line counters */ if (msr & UART_MSR_DCTS) port->icount.cts++; if (msr & UART_MSR_DDSR) port->icount.dsr++; if (msr & UART_MSR_DDCD) port->icount.dcd++; if (msr & UART_MSR_TERI) port->icount.rng++; wake_up_interruptible(&port->port.delta_msr_wait); if (!(msr & UART_MSR_DDCD)) return; dev_dbg(&port->dev, "%s: DCD Changed: phy_num: %d from %x to %x\n", __func__, port_priv->phy_num, old_msr, msr); tty = tty_port_tty_get(&port->port); if (!tty) return; usb_serial_handle_dcd_change(port, tty, msr & UART_MSR_DCD); tty_kref_put(tty); } static int f81534_read_msr(struct usb_serial_port *port) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); unsigned long flags; int status; u8 msr; /* Get MSR initial value */ status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr); if (status) return status; /* Force update current state */ spin_lock_irqsave(&port_priv->msr_lock, flags); port_priv->shadow_msr = msr; spin_unlock_irqrestore(&port_priv->msr_lock, flags); return 0; } static int f81534_open(struct tty_struct *tty, struct usb_serial_port *port) { struct f81534_serial_private *serial_priv = usb_get_serial_data(port->serial); struct f81534_port_private *port_priv = usb_get_serial_port_data(port); int status; status = f81534_set_port_register(port, F81534_FIFO_CONTROL_REG, UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); if (status) { dev_err(&port->dev, "%s: Clear FIFO failed: %d\n", __func__, status); return status; } if (tty) f81534_set_termios(tty, port, NULL); status = f81534_read_msr(port); if (status) return status; mutex_lock(&serial_priv->urb_mutex); /* Submit Read URBs for first port opened */ if (!serial_priv->opened_port) { status = f81534_submit_read_urb(port->serial, GFP_KERNEL); if (status) goto exit; } serial_priv->opened_port++; exit: mutex_unlock(&serial_priv->urb_mutex); set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty); return status; } static void f81534_close(struct usb_serial_port *port) { struct f81534_serial_private *serial_priv = usb_get_serial_data(port->serial); struct usb_serial_port *port0 = port->serial->port[0]; unsigned long flags; size_t i; usb_kill_urb(port->write_urbs[0]); spin_lock_irqsave(&port->lock, flags); kfifo_reset_out(&port->write_fifo); spin_unlock_irqrestore(&port->lock, flags); /* Kill Read URBs when final port closed */ mutex_lock(&serial_priv->urb_mutex); serial_priv->opened_port--; if (!serial_priv->opened_port) { for (i = 0; i < ARRAY_SIZE(port0->read_urbs); ++i) usb_kill_urb(port0->read_urbs[i]); } mutex_unlock(&serial_priv->urb_mutex); } static void f81534_get_serial_info(struct tty_struct *tty, struct serial_struct *ss) { struct usb_serial_port *port = tty->driver_data; struct f81534_port_private *port_priv; port_priv = usb_get_serial_port_data(port); ss->baud_base = port_priv->baud_base; } static void f81534_process_per_serial_block(struct usb_serial_port *port, u8 *data) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); int phy_num = data[0]; size_t read_size = 0; size_t i; char tty_flag; int status; u8 lsr; /* * The block layout is 128 Bytes * index 0: port phy idx (e.g., 0,1,2,3), * index 1: It's could be * F81534_TOKEN_RECEIVE * F81534_TOKEN_TX_EMPTY * F81534_TOKEN_MSR_CHANGE * index 2: serial in size (data+lsr, must be even) * meaningful for F81534_TOKEN_RECEIVE only * index 3: current MSR with this device * index 4~127: serial in data block (data+lsr, must be even) */ switch (data[1]) { case F81534_TOKEN_TX_EMPTY: set_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty); /* Try to submit writer */ status = f81534_submit_writer(port, GFP_ATOMIC); if (status) dev_err(&port->dev, "%s: submit failed\n", __func__); return; case F81534_TOKEN_MSR_CHANGE: f81534_msr_changed(port, data[3]); return; case F81534_TOKEN_RECEIVE: read_size = data[2]; if (read_size > F81534_MAX_RX_SIZE) { dev_err(&port->dev, "%s: phy: %d read_size: %zu larger than: %d\n", __func__, phy_num, read_size, F81534_MAX_RX_SIZE); return; } break; default: dev_warn(&port->dev, "%s: unknown token: %02x\n", __func__, data[1]); return; } for (i = 4; i < 4 + read_size; i += 2) { tty_flag = TTY_NORMAL; lsr = data[i + 1]; if (lsr & UART_LSR_BRK_ERROR_BITS) { if (lsr & UART_LSR_BI) { tty_flag = TTY_BREAK; port->icount.brk++; usb_serial_handle_break(port); } else if (lsr & UART_LSR_PE) { tty_flag = TTY_PARITY; port->icount.parity++; } else if (lsr & UART_LSR_FE) { tty_flag = TTY_FRAME; port->icount.frame++; } if (lsr & UART_LSR_OE) { port->icount.overrun++; tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); } schedule_work(&port_priv->lsr_work); } if (port->sysrq) { if (usb_serial_handle_sysrq_char(port, data[i])) continue; } tty_insert_flip_char(&port->port, data[i], tty_flag); } tty_flip_buffer_push(&port->port); } static void f81534_process_read_urb(struct urb *urb) { struct f81534_serial_private *serial_priv; struct usb_serial_port *port; struct usb_serial *serial; u8 *buf; int phy_port_num; int tty_port_num; size_t i; if (!urb->actual_length || urb->actual_length % F81534_RECEIVE_BLOCK_SIZE) { return; } port = urb->context; serial = port->serial; buf = urb->transfer_buffer; serial_priv = usb_get_serial_data(serial); for (i = 0; i < urb->actual_length; i += F81534_RECEIVE_BLOCK_SIZE) { phy_port_num = buf[i]; if (phy_port_num >= F81534_NUM_PORT) { dev_err(&port->dev, "%s: phy_port_num: %d larger than: %d\n", __func__, phy_port_num, F81534_NUM_PORT); continue; } tty_port_num = serial_priv->tty_idx[phy_port_num]; port = serial->port[tty_port_num]; if (tty_port_initialized(&port->port)) f81534_process_per_serial_block(port, &buf[i]); } } static void f81534_write_usb_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; switch (urb->status) { case 0: break; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: dev_dbg(&port->dev, "%s - urb stopped: %d\n", __func__, urb->status); return; case -EPIPE: dev_err(&port->dev, "%s - urb stopped: %d\n", __func__, urb->status); return; default: dev_dbg(&port->dev, "%s - nonzero urb status: %d\n", __func__, urb->status); break; } } static void f81534_lsr_worker(struct work_struct *work) { struct f81534_port_private *port_priv; struct usb_serial_port *port; int status; u8 tmp; port_priv = container_of(work, struct f81534_port_private, lsr_work); port = port_priv->port; status = f81534_get_port_register(port, F81534_LINE_STATUS_REG, &tmp); if (status) dev_warn(&port->dev, "read LSR failed: %d\n", status); } static int f81534_set_port_output_pin(struct usb_serial_port *port) { struct f81534_serial_private *serial_priv; struct f81534_port_private *port_priv; struct usb_serial *serial; const struct f81534_port_out_pin *pins; int status; int i; u8 value; u8 idx; serial = port->serial; serial_priv = usb_get_serial_data(serial); port_priv = usb_get_serial_port_data(port); idx = F81534_CONF_INIT_GPIO_OFFSET + port_priv->phy_num; value = serial_priv->conf_data[idx]; if (value >= F81534_CONF_GPIO_SHUTDOWN) { /* * Newer IC configure will make transceiver in shutdown mode on * initial power on. We need enable it before using UARTs. */ idx = F81534_CONF_WORK_GPIO_OFFSET + port_priv->phy_num; value = serial_priv->conf_data[idx]; if (value >= F81534_CONF_GPIO_SHUTDOWN) value = F81534_CONF_GPIO_RS232; } pins = &f81534_port_out_pins[port_priv->phy_num]; for (i = 0; i < ARRAY_SIZE(pins->pin); ++i) { status = f81534_set_mask_register(serial, pins->pin[i].reg_addr, pins->pin[i].reg_mask, value & BIT(i) ? pins->pin[i].reg_mask : 0); if (status) return status; } dev_dbg(&port->dev, "Output pin (M0/M1/M2): %d\n", value); return 0; } static int f81534_port_probe(struct usb_serial_port *port) { struct f81534_serial_private *serial_priv; struct f81534_port_private *port_priv; int ret; u8 value; serial_priv = usb_get_serial_data(port->serial); port_priv = devm_kzalloc(&port->dev, sizeof(*port_priv), GFP_KERNEL); if (!port_priv) return -ENOMEM; /* * We'll make tx frame error when baud rate from 384~500kps. So we'll * delay all tx data frame with 1bit. */ port_priv->shadow_clk = F81534_UART_EN | F81534_CLK_TX_DELAY_1BIT; spin_lock_init(&port_priv->msr_lock); mutex_init(&port_priv->mcr_mutex); mutex_init(&port_priv->lcr_mutex); INIT_WORK(&port_priv->lsr_work, f81534_lsr_worker); /* Assign logic-to-phy mapping */ ret = f81534_logic_to_phy_port(port->serial, port); if (ret < 0) return ret; port_priv->phy_num = ret; port_priv->port = port; usb_set_serial_port_data(port, port_priv); dev_dbg(&port->dev, "%s: port_number: %d, phy_num: %d\n", __func__, port->port_number, port_priv->phy_num); /* * The F81532/534 will hang-up when enable LSR interrupt in IER and * occur data overrun. So we'll disable the LSR interrupt in probe() * and submit the LSR worker to clear LSR state when reported LSR error * bit with bulk-in data in f81534_process_per_serial_block(). */ ret = f81534_set_port_register(port, F81534_INTERRUPT_ENABLE_REG, UART_IER_RDI | UART_IER_THRI | UART_IER_MSI); if (ret) return ret; value = serial_priv->conf_data[port_priv->phy_num]; switch (value & F81534_PORT_CONF_MODE_MASK) { case F81534_PORT_CONF_RS485_INVERT: port_priv->shadow_clk |= F81534_CLK_RS485_MODE | F81534_CLK_RS485_INVERT; dev_dbg(&port->dev, "RS485 invert mode\n"); break; case F81534_PORT_CONF_RS485: port_priv->shadow_clk |= F81534_CLK_RS485_MODE; dev_dbg(&port->dev, "RS485 mode\n"); break; default: case F81534_PORT_CONF_RS232: dev_dbg(&port->dev, "RS232 mode\n"); break; } return f81534_set_port_output_pin(port); } static void f81534_port_remove(struct usb_serial_port *port) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); flush_work(&port_priv->lsr_work); } static int f81534_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct f81534_port_private *port_priv = usb_get_serial_port_data(port); int status; int r; u8 msr; u8 mcr; /* Read current MSR from device */ status = f81534_get_port_register(port, F81534_MODEM_STATUS_REG, &msr); if (status) return status; mutex_lock(&port_priv->mcr_mutex); mcr = port_priv->shadow_mcr; mutex_unlock(&port_priv->mcr_mutex); r = (mcr & UART_MCR_DTR ? TIOCM_DTR : 0) | (mcr & UART_MCR_RTS ? TIOCM_RTS : 0) | (msr & UART_MSR_CTS ? TIOCM_CTS : 0) | (msr & UART_MSR_DCD ? TIOCM_CAR : 0) | (msr & UART_MSR_RI ? TIOCM_RI : 0) | (msr & UART_MSR_DSR ? TIOCM_DSR : 0); return r; } static int f81534_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; return f81534_update_mctrl(port, set, clear); } static void f81534_dtr_rts(struct usb_serial_port *port, int on) { if (on) f81534_update_mctrl(port, TIOCM_DTR | TIOCM_RTS, 0); else f81534_update_mctrl(port, 0, TIOCM_DTR | TIOCM_RTS); } static int f81534_write(struct tty_struct *tty, struct usb_serial_port *port, const u8 *buf, int count) { int bytes_out, status; if (!count) return 0; bytes_out = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock); status = f81534_submit_writer(port, GFP_ATOMIC); if (status) { dev_err(&port->dev, "%s: submit failed\n", __func__); return status; } return bytes_out; } static bool f81534_tx_empty(struct usb_serial_port *port) { struct f81534_port_private *port_priv = usb_get_serial_port_data(port); return test_bit(F81534_TX_EMPTY_BIT, &port_priv->tx_empty); } static int f81534_resume(struct usb_serial *serial) { struct f81534_serial_private *serial_priv = usb_get_serial_data(serial); struct usb_serial_port *port; int error = 0; int status; size_t i; /* * We'll register port 0 bulkin when port had opened, It'll take all * port received data, MSR register change and TX_EMPTY information. */ mutex_lock(&serial_priv->urb_mutex); if (serial_priv->opened_port) { status = f81534_submit_read_urb(serial, GFP_NOIO); if (status) { mutex_unlock(&serial_priv->urb_mutex); return status; } } mutex_unlock(&serial_priv->urb_mutex); for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; if (!tty_port_initialized(&port->port)) continue; status = f81534_submit_writer(port, GFP_NOIO); if (status) { dev_err(&port->dev, "%s: submit failed\n", __func__); ++error; } } if (error) return -EIO; return 0; } static struct usb_serial_driver f81534_device = { .driver = { .name = "f81534", }, .description = DRIVER_DESC, .id_table = f81534_id_table, .num_bulk_in = 1, .num_bulk_out = 1, .open = f81534_open, .close = f81534_close, .write = f81534_write, .tx_empty = f81534_tx_empty, .calc_num_ports = f81534_calc_num_ports, .port_probe = f81534_port_probe, .port_remove = f81534_port_remove, .break_ctl = f81534_break_ctl, .dtr_rts = f81534_dtr_rts, .process_read_urb = f81534_process_read_urb, .get_serial = f81534_get_serial_info, .tiocmget = f81534_tiocmget, .tiocmset = f81534_tiocmset, .write_bulk_callback = f81534_write_usb_callback, .set_termios = f81534_set_termios, .resume = f81534_resume, }; static struct usb_serial_driver *const serial_drivers[] = { &f81534_device, NULL }; module_usb_serial_driver(serial_drivers, f81534_id_table); MODULE_DEVICE_TABLE(usb, f81534_id_table); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Peter Hong <Peter_Hong@fintek.com.tw>"); MODULE_AUTHOR("Tom Tsai <Tom_Tsai@fintek.com.tw>"); MODULE_LICENSE("GPL");
1 14 11 4 11 1 1 6 2 2 7 2 5 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2002 Pavel Machek <pavel@ucw.cz> * Copyright (C) 2002-2005 by David Brownell */ // #define DEBUG // error path messages, extra info // #define VERBOSE // more; success messages #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/crc32.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> /* * All known Zaurii lie about their standards conformance. At least * the earliest SA-1100 models lie by saying they support CDC Ethernet. * Some later models (especially PXA-25x and PXA-27x based ones) lie * and say they support CDC MDLM (for access to cell phone modems). * * There are non-Zaurus products that use these same protocols too. * * The annoying thing is that at the same time Sharp was developing * that annoying standards-breaking software, the Linux community had * a simple "CDC Subset" working reliably on the same SA-1100 hardware. * That is, the same functionality but not violating standards. * * The CDC Ethernet nonconformance points are troublesome to hosts * with a true CDC Ethernet implementation: * - Framing appends a CRC, which the spec says drivers "must not" do; * - Transfers data in altsetting zero, instead of altsetting 1; * - All these peripherals use the same ethernet address. * * The CDC MDLM nonconformance is less immediately troublesome, since all * MDLM implementations are quasi-proprietary anyway. */ static struct sk_buff * zaurus_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { int padlen; struct sk_buff *skb2; padlen = 2; if (!skb_cloned(skb)) { int tailroom = skb_tailroom(skb); if ((padlen + 4) <= tailroom) goto done; } skb2 = skb_copy_expand(skb, 0, 4 + padlen, flags); dev_kfree_skb_any(skb); skb = skb2; if (skb) { u32 fcs; done: fcs = crc32_le(~0, skb->data, skb->len); fcs = ~fcs; skb_put_u8(skb, fcs & 0xff); skb_put_u8(skb, (fcs >> 8) & 0xff); skb_put_u8(skb, (fcs >> 16) & 0xff); skb_put_u8(skb, (fcs >> 24) & 0xff); } return skb; } static int zaurus_bind(struct usbnet *dev, struct usb_interface *intf) { /* Belcarra's funky framing has other options; mostly * TRAILERS (!) with 4 bytes CRC, and maybe 2 pad bytes. */ dev->net->hard_header_len += 6; dev->rx_urb_size = dev->net->hard_header_len + dev->net->mtu; return usbnet_generic_cdc_bind(dev, intf); } /* PDA style devices are always connected if present */ static int always_connected (struct usbnet *dev) { return 0; } static const struct driver_info zaurus_sl5x00_info = { .description = "Sharp Zaurus SL-5x00", .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z, .check_connect = always_connected, .bind = zaurus_bind, .unbind = usbnet_cdc_unbind, .tx_fixup = zaurus_tx_fixup, }; #define ZAURUS_STRONGARM_INFO ((unsigned long)&zaurus_sl5x00_info) static const struct driver_info zaurus_pxa_info = { .description = "Sharp Zaurus, PXA-2xx based", .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z, .check_connect = always_connected, .bind = zaurus_bind, .unbind = usbnet_cdc_unbind, .tx_fixup = zaurus_tx_fixup, }; #define ZAURUS_PXA_INFO ((unsigned long)&zaurus_pxa_info) static const struct driver_info olympus_mxl_info = { .description = "Olympus R1000", .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z, .check_connect = always_connected, .bind = zaurus_bind, .unbind = usbnet_cdc_unbind, .tx_fixup = zaurus_tx_fixup, }; #define OLYMPUS_MXL_INFO ((unsigned long)&olympus_mxl_info) /* Some more recent products using Lineo/Belcarra code will wrongly claim * CDC MDLM conformance. They aren't conformant: data endpoints live * in the control interface, there's no data interface, and it's not used * to talk to a cell phone radio. But at least we can detect these two * pseudo-classes, rather than growing this product list with entries for * each new nonconformant product (sigh). */ static const u8 safe_guid[16] = { 0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6, 0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f, }; static const u8 blan_guid[16] = { 0x74, 0xf0, 0x3d, 0xbd, 0x1e, 0xc1, 0x44, 0x70, 0xa3, 0x67, 0x71, 0x34, 0xc9, 0xf5, 0x54, 0x37, }; static int blan_mdlm_bind(struct usbnet *dev, struct usb_interface *intf) { u8 *buf = intf->cur_altsetting->extra; int len = intf->cur_altsetting->extralen; struct usb_cdc_mdlm_desc *desc = NULL; struct usb_cdc_mdlm_detail_desc *detail = NULL; while (len > 3) { if (buf [1] != USB_DT_CS_INTERFACE) goto next_desc; /* use bDescriptorSubType, and just verify that we get a * "BLAN" (or "SAFE") descriptor. */ switch (buf [2]) { case USB_CDC_MDLM_TYPE: if (desc) { dev_dbg(&intf->dev, "extra MDLM\n"); goto bad_desc; } desc = (void *) buf; if (desc->bLength != sizeof *desc) { dev_dbg(&intf->dev, "MDLM len %u\n", desc->bLength); goto bad_desc; } /* expect bcdVersion 1.0, ignore */ if (memcmp(&desc->bGUID, blan_guid, 16) && memcmp(&desc->bGUID, safe_guid, 16)) { /* hey, this one might _really_ be MDLM! */ dev_dbg(&intf->dev, "MDLM guid\n"); goto bad_desc; } break; case USB_CDC_MDLM_DETAIL_TYPE: if (detail) { dev_dbg(&intf->dev, "extra MDLM detail\n"); goto bad_desc; } detail = (void *) buf; switch (detail->bGuidDescriptorType) { case 0: /* "SAFE" */ if (detail->bLength != (sizeof *detail + 2)) goto bad_detail; break; case 1: /* "BLAN" */ if (detail->bLength != (sizeof *detail + 3)) goto bad_detail; break; default: goto bad_detail; } /* assuming we either noticed BLAN already, or will * find it soon, there are some data bytes here: * - bmNetworkCapabilities (unused) * - bmDataCapabilities (bits, see below) * - bPad (ignored, for PADAFTER -- BLAN-only) * bits are: * - 0x01 -- Zaurus framing (add CRC) * - 0x02 -- PADBEFORE (CRC includes some padding) * - 0x04 -- PADAFTER (some padding after CRC) * - 0x08 -- "fermat" packet mangling (for hw bugs) * the PADBEFORE appears not to matter; we interop * with devices that use it and those that don't. */ if ((detail->bDetailData[1] & ~0x02) != 0x01) { /* bmDataCapabilities == 0 would be fine too, * but framing is minidriver-coupled for now. */ bad_detail: dev_dbg(&intf->dev, "bad MDLM detail, %d %d %d\n", detail->bLength, detail->bDetailData[0], detail->bDetailData[2]); goto bad_desc; } /* same extra framing as for non-BLAN mode */ dev->net->hard_header_len += 6; dev->rx_urb_size = dev->net->hard_header_len + dev->net->mtu; break; } next_desc: len -= buf [0]; /* bLength */ buf += buf [0]; } if (!desc || !detail) { dev_dbg(&intf->dev, "missing cdc mdlm %s%sdescriptor\n", desc ? "" : "func ", detail ? "" : "detail "); goto bad_desc; } /* There's probably a CDC Ethernet descriptor there, but we can't * rely on the Ethernet address it provides since not all vendors * bother to make it unique. Likewise there's no point in tracking * of the CDC event notifications. */ return usbnet_get_endpoints(dev, intf); bad_desc: dev_info(&dev->udev->dev, "unsupported MDLM descriptors\n"); return -ENODEV; } static const struct driver_info bogus_mdlm_info = { .description = "pseudo-MDLM (BLAN) device", .flags = FLAG_POINTTOPOINT | FLAG_FRAMING_Z, .check_connect = always_connected, .tx_fixup = zaurus_tx_fixup, .bind = blan_mdlm_bind, }; static const struct usb_device_id products [] = { #define ZAURUS_MASTER_INTERFACE \ .bInterfaceClass = USB_CLASS_COMM, \ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE #define ZAURUS_FAKE_INTERFACE \ .bInterfaceClass = USB_CLASS_COMM, \ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \ .bInterfaceProtocol = USB_CDC_PROTO_NONE /* SA-1100 based Sharp Zaurus ("collie"), or compatible. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8004, ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_STRONGARM_INFO, }, /* PXA-2xx based models are also lying-about-cdc. If you add any * more devices that claim to be CDC Ethernet, make sure they get * added to the blacklist in cdc_ether too. * * NOTE: OpenZaurus versions with 2.6 kernels won't use these entries, * unlike the older ones with 2.4 "embedix" kernels. */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8005, /* A-300 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8005, /* A-300 */ ZAURUS_FAKE_INTERFACE, .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_FAKE_INTERFACE, .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x8007, /* C-700 */ ZAURUS_FAKE_INTERFACE, .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9031, /* C-750 C-760 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, }, { /* C-750/C-760/C-860/SL-C3000 PDA in MDLM mode */ USB_DEVICE_AND_INTERFACE_INFO(0x04DD, 0x9031, USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9032, /* SL-6000 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, .idProduct = 0x9032, /* SL-6000 */ ZAURUS_FAKE_INTERFACE, .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, /* reported with some C860 units */ .idProduct = 0x9050, /* C-860 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, }, { /* Motorola Rokr E6 */ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6027, USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &bogus_mdlm_info, }, { /* Motorola MOTOMAGX phones */ USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x6425, USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &bogus_mdlm_info, }, /* Olympus has some models with a Zaurus-compatible option. * R-1000 uses a FreeScale i.MXL cpu (ARMv4T) */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x07B4, .idProduct = 0x0F02, /* R-1000 */ ZAURUS_MASTER_INTERFACE, .driver_info = OLYMPUS_MXL_INFO, }, /* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */ { USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &bogus_mdlm_info, }, { }, // END }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver zaurus_driver = { .name = "zaurus", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(zaurus_driver); MODULE_AUTHOR("Pavel Machek, David Brownell"); MODULE_DESCRIPTION("Sharp Zaurus PDA, and compatible products"); MODULE_LICENSE("GPL");
10621 559 17626 603 88 18093 18079 18109 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 // SPDX-License-Identifier: GPL-2.0 #include <linux/compiler.h> #include <linux/export.h> #include <linux/fault-inject-usercopy.h> #include <linux/kasan-checks.h> #include <linux/thread_info.h> #include <linux/uaccess.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <asm/word-at-a-time.h> #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define IS_UNALIGNED(src, dst) 0 #else #define IS_UNALIGNED(src, dst) \ (((long) dst | (long) src) & (sizeof(long) - 1)) #endif /* * Do a strncpy, return length of string without final '\0'. * 'count' is the user-supplied count (return 'count' if we * hit it), 'max' is the address space maximum (and we return * -EFAULT if we hit it). */ static __always_inline long do_strncpy_from_user(char *dst, const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; unsigned long res = 0; if (IS_UNALIGNED(src, dst)) goto byte_at_a_time; while (max >= sizeof(unsigned long)) { unsigned long c, data, mask; /* Fall back to byte-at-a-time if we get a page fault */ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time); /* * Note that we mask out the bytes following the NUL. This is * important to do because string oblivious code may read past * the NUL. For those routines, we don't want to give them * potentially random bytes after the NUL in `src`. * * One example of such code is BPF map keys. BPF treats map keys * as an opaque set of bytes. Without the post-NUL mask, any BPF * maps keyed by strings returned from strncpy_from_user() may * have multiple entries for semantically identical strings. */ if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); mask = zero_bytemask(data); *(unsigned long *)(dst+res) = c & mask; return res + find_zero(data); } *(unsigned long *)(dst+res) = c; res += sizeof(unsigned long); max -= sizeof(unsigned long); } byte_at_a_time: while (max) { char c; unsafe_get_user(c,src+res, efault); dst[res] = c; if (!c) return res; res++; max--; } /* * Uhhuh. We hit 'max'. But was that the user-specified maximum * too? If so, that's ok - we got as much as the user asked for. */ if (res >= count) return res; /* * Nope: we hit the address space limit, and we still had more * characters the caller would have wanted. That's an EFAULT. */ efault: return -EFAULT; } /** * strncpy_from_user: - Copy a NUL terminated string from userspace. * @dst: Destination address, in kernel space. This buffer must be at * least @count bytes long. * @src: Source address, in user space. * @count: Maximum number of bytes to copy, including the trailing NUL. * * Copies a NUL-terminated string from userspace to kernel space. * * On success, returns the length of the string (not including the trailing * NUL). * * If access to userspace fails, returns -EFAULT (some data may have been * copied). * * If @count is smaller than the length of the string, copies @count bytes * and returns @count. */ long strncpy_from_user(char *dst, const char __user *src, long count) { unsigned long max_addr, src_addr; might_fault(); if (should_fail_usercopy()) return -EFAULT; if (unlikely(count <= 0)) return 0; kasan_check_write(dst, count); check_object_size(dst, count, false); if (can_do_masked_user_access()) { long retval; src = masked_user_access_begin(src); retval = do_strncpy_from_user(dst, src, count, count); user_read_access_end(); return retval; } max_addr = TASK_SIZE_MAX; src_addr = (unsigned long)untagged_addr(src); if (likely(src_addr < max_addr)) { unsigned long max = max_addr - src_addr; long retval; /* * Truncate 'max' to the user-specified limit, so that * we only have one limit we need to check in the loop */ if (max > count) max = count; if (user_read_access_begin(src, max)) { retval = do_strncpy_from_user(dst, src, count, max); user_read_access_end(); return retval; } } return -EFAULT; } EXPORT_SYMBOL(strncpy_from_user);
14 2 2 19 7 14 1 2 5 5 5 5 6 3 3 1 5 5 2 4 5 21 7 14 18 3 5 21 5 1 10 1 5 5 5 9 15 2 7 1 16 10 8 1 5 4 1 1 9 4 5 10 44 44 1 44 1 1 42 23 14 7 7 1 7 4 33 7 17 21 1 21 16 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 // SPDX-License-Identifier: GPL-2.0 #include <linux/fsverity.h> #include <linux/iomap.h> #include "ctree.h" #include "delalloc-space.h" #include "direct-io.h" #include "extent-tree.h" #include "file.h" #include "fs.h" #include "transaction.h" #include "volumes.h" struct btrfs_dio_data { ssize_t submitted; struct extent_changeset *data_reserved; struct btrfs_ordered_extent *ordered; bool data_space_reserved; bool nocow_done; }; struct btrfs_dio_private { /* Range of I/O */ u64 file_offset; u32 bytes; /* This must be last */ struct btrfs_bio bbio; }; static struct bio_set btrfs_dio_bioset; static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, struct extent_state **cached_state, unsigned int iomap_flags) { const bool writing = (iomap_flags & IOMAP_WRITE); const bool nowait = (iomap_flags & IOMAP_NOWAIT); struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct btrfs_ordered_extent *ordered; int ret = 0; /* Direct lock must be taken before the extent lock. */ if (nowait) { if (!btrfs_try_lock_dio_extent(io_tree, lockstart, lockend, cached_state)) return -EAGAIN; } else { btrfs_lock_dio_extent(io_tree, lockstart, lockend, cached_state); } while (1) { if (nowait) { if (!btrfs_try_lock_extent(io_tree, lockstart, lockend, cached_state)) { ret = -EAGAIN; break; } } else { btrfs_lock_extent(io_tree, lockstart, lockend, cached_state); } /* * We're concerned with the entire range that we're going to be * doing DIO to, so we need to make sure there's no ordered * extents in this range. */ ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, lockend - lockstart + 1); /* * We need to make sure there are no buffered pages in this * range either, we could have raced between the invalidate in * generic_file_direct_write and locking the extent. The * invalidate needs to happen so that reads after a write do not * get stale data. */ if (!ordered && (!writing || !filemap_range_has_page(inode->i_mapping, lockstart, lockend))) break; btrfs_unlock_extent(io_tree, lockstart, lockend, cached_state); if (ordered) { if (nowait) { btrfs_put_ordered_extent(ordered); ret = -EAGAIN; break; } /* * If we are doing a DIO read and the ordered extent we * found is for a buffered write, we can not wait for it * to complete and retry, because if we do so we can * deadlock with concurrent buffered writes on page * locks. This happens only if our DIO read covers more * than one extent map, if at this point has already * created an ordered extent for a previous extent map * and locked its range in the inode's io tree, and a * concurrent write against that previous extent map's * range and this range started (we unlock the ranges * in the io tree only when the bios complete and * buffered writes always lock pages before attempting * to lock range in the io tree). */ if (writing || test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) btrfs_start_ordered_extent(ordered); else ret = nowait ? -EAGAIN : -ENOTBLK; btrfs_put_ordered_extent(ordered); } else { /* * We could trigger writeback for this range (and wait * for it to complete) and then invalidate the pages for * this range (through invalidate_inode_pages2_range()), * but that can lead us to a deadlock with a concurrent * call to readahead (a buffered read or a defrag call * triggered a readahead) on a page lock due to an * ordered dio extent we created before but did not have * yet a corresponding bio submitted (whence it can not * complete), which makes readahead wait for that * ordered extent to complete while holding a lock on * that page. */ ret = nowait ? -EAGAIN : -ENOTBLK; } if (ret) break; cond_resched(); } if (ret) btrfs_unlock_dio_extent(io_tree, lockstart, lockend, cached_state); return ret; } static struct extent_map *btrfs_create_dio_extent(struct btrfs_inode *inode, struct btrfs_dio_data *dio_data, const u64 start, const struct btrfs_file_extent *file_extent, const int type) { struct extent_map *em = NULL; struct btrfs_ordered_extent *ordered; if (type != BTRFS_ORDERED_NOCOW) { em = btrfs_create_io_em(inode, start, file_extent, type); if (IS_ERR(em)) goto out; } ordered = btrfs_alloc_ordered_extent(inode, start, file_extent, (1U << type) | (1U << BTRFS_ORDERED_DIRECT)); if (IS_ERR(ordered)) { if (em) { btrfs_free_extent_map(em); btrfs_drop_extent_map_range(inode, start, start + file_extent->num_bytes - 1, false); } em = ERR_CAST(ordered); } else { ASSERT(!dio_data->ordered); dio_data->ordered = ordered; } out: return em; } static struct extent_map *btrfs_new_extent_direct(struct btrfs_inode *inode, struct btrfs_dio_data *dio_data, u64 start, u64 len) { struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_file_extent file_extent; struct extent_map *em; struct btrfs_key ins; u64 alloc_hint; int ret; alloc_hint = btrfs_get_extent_allocation_hint(inode, start, len); again: ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, 0, alloc_hint, &ins, 1, 1); if (ret == -EAGAIN) { ASSERT(btrfs_is_zoned(fs_info)); wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH, TASK_UNINTERRUPTIBLE); goto again; } if (ret) return ERR_PTR(ret); file_extent.disk_bytenr = ins.objectid; file_extent.disk_num_bytes = ins.offset; file_extent.num_bytes = ins.offset; file_extent.ram_bytes = ins.offset; file_extent.offset = 0; file_extent.compression = BTRFS_COMPRESS_NONE; em = btrfs_create_dio_extent(inode, dio_data, start, &file_extent, BTRFS_ORDERED_REGULAR); btrfs_dec_block_group_reservations(fs_info, ins.objectid); if (IS_ERR(em)) btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, true); return em; } static int btrfs_get_blocks_direct_write(struct extent_map **map, struct inode *inode, struct btrfs_dio_data *dio_data, u64 start, u64 *lenp, unsigned int iomap_flags) { const bool nowait = (iomap_flags & IOMAP_NOWAIT); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_file_extent file_extent; struct extent_map *em = *map; int type; u64 block_start; struct btrfs_block_group *bg; bool can_nocow = false; bool space_reserved = false; u64 len = *lenp; u64 prev_len; int ret = 0; /* * We don't allocate a new extent in the following cases * * 1) The inode is marked as NODATACOW. In this case we'll just use the * existing extent. * 2) The extent is marked as PREALLOC. We're good to go here and can * just use the extent. * */ if ((em->flags & EXTENT_FLAG_PREALLOC) || ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && em->disk_bytenr != EXTENT_MAP_HOLE)) { if (em->flags & EXTENT_FLAG_PREALLOC) type = BTRFS_ORDERED_PREALLOC; else type = BTRFS_ORDERED_NOCOW; len = min(len, em->len - (start - em->start)); block_start = btrfs_extent_map_block_start(em) + (start - em->start); if (can_nocow_extent(BTRFS_I(inode), start, &len, &file_extent, false) == 1) { bg = btrfs_inc_nocow_writers(fs_info, block_start); if (bg) can_nocow = true; } } prev_len = len; if (can_nocow) { struct extent_map *em2; /* We can NOCOW, so only need to reserve metadata space. */ ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, nowait); if (ret < 0) { /* Our caller expects us to free the input extent map. */ btrfs_free_extent_map(em); *map = NULL; btrfs_dec_nocow_writers(bg); if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) ret = -EAGAIN; goto out; } space_reserved = true; em2 = btrfs_create_dio_extent(BTRFS_I(inode), dio_data, start, &file_extent, type); btrfs_dec_nocow_writers(bg); if (type == BTRFS_ORDERED_PREALLOC) { btrfs_free_extent_map(em); *map = em2; em = em2; } if (IS_ERR(em2)) { ret = PTR_ERR(em2); goto out; } dio_data->nocow_done = true; } else { /* Our caller expects us to free the input extent map. */ btrfs_free_extent_map(em); *map = NULL; if (nowait) { ret = -EAGAIN; goto out; } /* * If we could not allocate data space before locking the file * range and we can't do a NOCOW write, then we have to fail. */ if (!dio_data->data_space_reserved) { ret = -ENOSPC; goto out; } /* * We have to COW and we have already reserved data space before, * so now we reserve only metadata. */ ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len, len, false); if (ret < 0) goto out; space_reserved = true; em = btrfs_new_extent_direct(BTRFS_I(inode), dio_data, start, len); if (IS_ERR(em)) { ret = PTR_ERR(em); goto out; } *map = em; len = min(len, em->len - (start - em->start)); if (len < prev_len) btrfs_delalloc_release_metadata(BTRFS_I(inode), prev_len - len, true); } /* * We have created our ordered extent, so we can now release our reservation * for an outstanding extent. */ btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len); /* * Need to update the i_size under the extent lock so buffered * readers will get the updated i_size when we unlock. */ if (start + len > i_size_read(inode)) i_size_write(inode, start + len); out: if (ret && space_reserved) { btrfs_delalloc_release_extents(BTRFS_I(inode), len); btrfs_delalloc_release_metadata(BTRFS_I(inode), len, true); } *lenp = len; return ret; } static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct extent_map *em; struct extent_state *cached_state = NULL; struct btrfs_dio_data *dio_data = iter->private; u64 lockstart, lockend; const bool write = !!(flags & IOMAP_WRITE); int ret = 0; u64 len = length; const u64 data_alloc_len = length; u32 unlock_bits = EXTENT_LOCKED; /* * We could potentially fault if we have a buffer > PAGE_SIZE, and if * we're NOWAIT we may submit a bio for a partial range and return * EIOCBQUEUED, which would result in an errant short read. * * The best way to handle this would be to allow for partial completions * of iocb's, so we could submit the partial bio, return and fault in * the rest of the pages, and then submit the io for the rest of the * range. However we don't have that currently, so simply return * -EAGAIN at this point so that the normal path is used. */ if (!write && (flags & IOMAP_NOWAIT) && length > PAGE_SIZE) return -EAGAIN; /* * Cap the size of reads to that usually seen in buffered I/O as we need * to allocate a contiguous array for the checksums. */ if (!write) len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); lockstart = start; lockend = start + len - 1; /* * iomap_dio_rw() only does filemap_write_and_wait_range(), which isn't * enough if we've written compressed pages to this area, so we need to * flush the dirty pages again to make absolutely sure that any * outstanding dirty pages are on disk - the first flush only starts * compression on the data, while keeping the pages locked, so by the * time the second flush returns we know bios for the compressed pages * were submitted and finished, and the pages no longer under writeback. * * If we have a NOWAIT request and we have any pages in the range that * are locked, likely due to compression still in progress, we don't want * to block on page locks. We also don't want to block on pages marked as * dirty or under writeback (same as for the non-compression case). * iomap_dio_rw() did the same check, but after that and before we got * here, mmap'ed writes may have happened or buffered reads started * (readpage() and readahead(), which lock pages), as we haven't locked * the file range yet. */ if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &BTRFS_I(inode)->runtime_flags)) { if (flags & IOMAP_NOWAIT) { if (filemap_range_needs_writeback(inode->i_mapping, lockstart, lockend)) return -EAGAIN; } else { ret = filemap_fdatawrite_range(inode->i_mapping, start, start + length - 1); if (ret) return ret; } } memset(dio_data, 0, sizeof(*dio_data)); /* * We always try to allocate data space and must do it before locking * the file range, to avoid deadlocks with concurrent writes to the same * range if the range has several extents and the writes don't expand the * current i_size (the inode lock is taken in shared mode). If we fail to * allocate data space here we continue and later, after locking the * file range, we fail with ENOSPC only if we figure out we can not do a * NOCOW write. */ if (write && !(flags & IOMAP_NOWAIT)) { ret = btrfs_check_data_free_space(BTRFS_I(inode), &dio_data->data_reserved, start, data_alloc_len, false); if (!ret) dio_data->data_space_reserved = true; else if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) goto err; } /* * If this errors out it's because we couldn't invalidate pagecache for * this range and we need to fallback to buffered IO, or we are doing a * NOWAIT read/write and we need to block. */ ret = lock_extent_direct(inode, lockstart, lockend, &cached_state, flags); if (ret < 0) goto err; em = btrfs_get_extent(BTRFS_I(inode), NULL, start, len); if (IS_ERR(em)) { ret = PTR_ERR(em); goto unlock_err; } /* * Ok for INLINE and COMPRESSED extents we need to fallback on buffered * io. INLINE is special, and we could probably kludge it in here, but * it's still buffered so for safety lets just fall back to the generic * buffered path. * * For COMPRESSED we _have_ to read the entire extent in so we can * decompress it, so there will be buffering required no matter what we * do, so go ahead and fallback to buffered. * * We return -ENOTBLK because that's what makes DIO go ahead and go back * to buffered IO. Don't blame me, this is the price we pay for using * the generic code. */ if (btrfs_extent_map_is_compressed(em) || em->disk_bytenr == EXTENT_MAP_INLINE) { btrfs_free_extent_map(em); /* * If we are in a NOWAIT context, return -EAGAIN in order to * fallback to buffered IO. This is not only because we can * block with buffered IO (no support for NOWAIT semantics at * the moment) but also to avoid returning short reads to user * space - this happens if we were able to read some data from * previous non-compressed extents and then when we fallback to * buffered IO, at btrfs_file_read_iter() by calling * filemap_read(), we fail to fault in pages for the read buffer, * in which case filemap_read() returns a short read (the number * of bytes previously read is > 0, so it does not return -EFAULT). */ ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; goto unlock_err; } len = min(len, em->len - (start - em->start)); /* * If we have a NOWAIT request and the range contains multiple extents * (or a mix of extents and holes), then we return -EAGAIN to make the * caller fallback to a context where it can do a blocking (without * NOWAIT) request. This way we avoid doing partial IO and returning * success to the caller, which is not optimal for writes and for reads * it can result in unexpected behaviour for an application. * * When doing a read, because we use IOMAP_DIO_PARTIAL when calling * iomap_dio_rw(), we can end up returning less data then what the caller * asked for, resulting in an unexpected, and incorrect, short read. * That is, the caller asked to read N bytes and we return less than that, * which is wrong unless we are crossing EOF. This happens if we get a * page fault error when trying to fault in pages for the buffer that is * associated to the struct iov_iter passed to iomap_dio_rw(), and we * have previously submitted bios for other extents in the range, in * which case iomap_dio_rw() may return us EIOCBQUEUED if not all of * those bios have completed by the time we get the page fault error, * which we return back to our caller - we should only return EIOCBQUEUED * after we have submitted bios for all the extents in the range. */ if ((flags & IOMAP_NOWAIT) && len < length) { btrfs_free_extent_map(em); ret = -EAGAIN; goto unlock_err; } if (write) { ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, start, &len, flags); if (ret < 0) goto unlock_err; /* Recalc len in case the new em is smaller than requested */ len = min(len, em->len - (start - em->start)); if (dio_data->data_space_reserved) { u64 release_offset; u64 release_len = 0; if (dio_data->nocow_done) { release_offset = start; release_len = data_alloc_len; } else if (len < data_alloc_len) { release_offset = start + len; release_len = data_alloc_len - len; } if (release_len > 0) btrfs_free_reserved_data_space(BTRFS_I(inode), dio_data->data_reserved, release_offset, release_len); } } /* * Translate extent map information to iomap. * We trim the extents (and move the addr) even though iomap code does * that, since we have locked only the parts we are performing I/O in. */ if ((em->disk_bytenr == EXTENT_MAP_HOLE) || ((em->flags & EXTENT_FLAG_PREALLOC) && !write)) { iomap->addr = IOMAP_NULL_ADDR; iomap->type = IOMAP_HOLE; } else { iomap->addr = btrfs_extent_map_block_start(em) + (start - em->start); iomap->type = IOMAP_MAPPED; } iomap->offset = start; iomap->bdev = fs_info->fs_devices->latest_dev->bdev; iomap->length = len; btrfs_free_extent_map(em); /* * Reads will hold the EXTENT_DIO_LOCKED bit until the io is completed, * writes only hold it for this part. We hold the extent lock until * we're completely done with the extent map to make sure it remains * valid. */ if (write) unlock_bits |= EXTENT_DIO_LOCKED; btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_bits, &cached_state); /* We didn't use everything, unlock the dio extent for the remainder. */ if (!write && (start + len) < lockend) btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, start + len, lockend, NULL); return 0; unlock_err: /* * Don't use EXTENT_LOCK_BITS here in case we extend it later and forget * to update this, be explicit that we expect EXTENT_LOCKED and * EXTENT_DIO_LOCKED to be set here, and so that's what we're clearing. */ btrfs_clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, EXTENT_LOCKED | EXTENT_DIO_LOCKED, &cached_state); err: if (dio_data->data_space_reserved) { btrfs_free_reserved_data_space(BTRFS_I(inode), dio_data->data_reserved, start, data_alloc_len); extent_changeset_free(dio_data->data_reserved); } return ret; } static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, ssize_t written, unsigned int flags, struct iomap *iomap) { struct iomap_iter *iter = container_of(iomap, struct iomap_iter, iomap); struct btrfs_dio_data *dio_data = iter->private; size_t submitted = dio_data->submitted; const bool write = !!(flags & IOMAP_WRITE); int ret = 0; if (!write && (iomap->type == IOMAP_HOLE)) { /* If reading from a hole, unlock and return */ btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, NULL); return 0; } if (submitted < length) { pos += submitted; length -= submitted; if (write) btrfs_finish_ordered_extent(dio_data->ordered, NULL, pos, length, false); else btrfs_unlock_dio_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, NULL); ret = -ENOTBLK; } if (write) { btrfs_put_ordered_extent(dio_data->ordered); dio_data->ordered = NULL; } if (write) extent_changeset_free(dio_data->data_reserved); return ret; } static void btrfs_dio_end_io(struct btrfs_bio *bbio) { struct btrfs_dio_private *dip = container_of(bbio, struct btrfs_dio_private, bbio); struct btrfs_inode *inode = bbio->inode; struct bio *bio = &bbio->bio; if (bio->bi_status) { btrfs_warn(inode->root->fs_info, "direct IO failed ino %llu op 0x%0x offset %#llx len %u err no %d", btrfs_ino(inode), bio->bi_opf, dip->file_offset, dip->bytes, bio->bi_status); } if (btrfs_op(bio) == BTRFS_MAP_WRITE) { btrfs_finish_ordered_extent(bbio->ordered, NULL, dip->file_offset, dip->bytes, !bio->bi_status); } else { btrfs_unlock_dio_extent(&inode->io_tree, dip->file_offset, dip->file_offset + dip->bytes - 1, NULL); } bbio->bio.bi_private = bbio->private; iomap_dio_bio_end_io(bio); } static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, struct btrfs_ordered_extent *ordered) { u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; u64 len = bbio->bio.bi_iter.bi_size; struct btrfs_ordered_extent *new; int ret; /* Must always be called for the beginning of an ordered extent. */ if (WARN_ON_ONCE(start != ordered->disk_bytenr)) return -EINVAL; /* No need to split if the ordered extent covers the entire bio. */ if (ordered->disk_num_bytes == len) { refcount_inc(&ordered->refs); bbio->ordered = ordered; return 0; } /* * Don't split the extent_map for NOCOW extents, as we're writing into * a pre-existing one. */ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { ret = btrfs_split_extent_map(bbio->inode, bbio->file_offset, ordered->num_bytes, len, ordered->disk_bytenr); if (ret) return ret; } new = btrfs_split_ordered_extent(ordered, len); if (IS_ERR(new)) return PTR_ERR(new); bbio->ordered = new; return 0; } static void btrfs_dio_submit_io(const struct iomap_iter *iter, struct bio *bio, loff_t file_offset) { struct btrfs_bio *bbio = btrfs_bio(bio); struct btrfs_dio_private *dip = container_of(bbio, struct btrfs_dio_private, bbio); struct btrfs_dio_data *dio_data = iter->private; btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info, btrfs_dio_end_io, bio->bi_private); bbio->inode = BTRFS_I(iter->inode); bbio->file_offset = file_offset; dip->file_offset = file_offset; dip->bytes = bio->bi_iter.bi_size; dio_data->submitted += bio->bi_iter.bi_size; /* * Check if we are doing a partial write. If we are, we need to split * the ordered extent to match the submitted bio. Hang on to the * remaining unfinishable ordered_extent in dio_data so that it can be * cancelled in iomap_end to avoid a deadlock wherein faulting the * remaining pages is blocked on the outstanding ordered extent. */ if (iter->flags & IOMAP_WRITE) { int ret; ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered); if (ret) { btrfs_finish_ordered_extent(dio_data->ordered, NULL, file_offset, dip->bytes, !ret); bio->bi_status = errno_to_blk_status(ret); iomap_dio_bio_end_io(bio); return; } } btrfs_submit_bbio(bbio, 0); } static const struct iomap_ops btrfs_dio_iomap_ops = { .iomap_begin = btrfs_dio_iomap_begin, .iomap_end = btrfs_dio_iomap_end, }; static const struct iomap_dio_ops btrfs_dio_ops = { .submit_io = btrfs_dio_submit_io, .bio_set = &btrfs_dio_bioset, }; static ssize_t btrfs_dio_read(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) { struct btrfs_dio_data data = { 0 }; return iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, IOMAP_DIO_PARTIAL, &data, done_before); } static struct iomap_dio *btrfs_dio_write(struct kiocb *iocb, struct iov_iter *iter, size_t done_before) { struct btrfs_dio_data data = { 0 }; return __iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dio_ops, IOMAP_DIO_PARTIAL, &data, done_before); } static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, const struct iov_iter *iter, loff_t offset) { const u32 blocksize_mask = fs_info->sectorsize - 1; if (offset & blocksize_mask) return -EINVAL; if (iov_iter_alignment(iter) & blocksize_mask) return -EINVAL; return 0; } ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); loff_t pos; ssize_t written = 0; ssize_t written_buffered; size_t prev_left = 0; loff_t endbyte; ssize_t ret; unsigned int ilock_flags = 0; struct iomap_dio *dio; if (iocb->ki_flags & IOCB_NOWAIT) ilock_flags |= BTRFS_ILOCK_TRY; /* * If the write DIO is within EOF, use a shared lock and also only if * security bits will likely not be dropped by file_remove_privs() called * from btrfs_write_check(). Either will need to be rechecked after the * lock was acquired. */ if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode)) ilock_flags |= BTRFS_ILOCK_SHARED; relock: ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags); if (ret < 0) return ret; /* Shared lock cannot be used with security bits set. */ if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) { btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); ilock_flags &= ~BTRFS_ILOCK_SHARED; goto relock; } ret = generic_write_checks(iocb, from); if (ret <= 0) { btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); return ret; } ret = btrfs_write_check(iocb, ret); if (ret < 0) { btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); goto out; } pos = iocb->ki_pos; /* * Re-check since file size may have changed just before taking the * lock or pos may have changed because of O_APPEND in generic_write_check() */ if ((ilock_flags & BTRFS_ILOCK_SHARED) && pos + iov_iter_count(from) > i_size_read(inode)) { btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); ilock_flags &= ~BTRFS_ILOCK_SHARED; goto relock; } if (check_direct_IO(fs_info, from, pos)) { btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); goto buffered; } /* * We can't control the folios being passed in, applications can write * to them while a direct IO write is in progress. This means the * content might change after we calculated the data checksum. * Therefore we can end up storing a checksum that doesn't match the * persisted data. * * To be extra safe and avoid false data checksum mismatch, if the * inode requires data checksum, just fallback to buffered IO. * For buffered IO we have full control of page cache and can ensure * no one is modifying the content during writeback. */ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); goto buffered; } /* * The iov_iter can be mapped to the same file range we are writing to. * If that's the case, then we will deadlock in the iomap code, because * it first calls our callback btrfs_dio_iomap_begin(), which will create * an ordered extent, and after that it will fault in the pages that the * iov_iter refers to. During the fault in we end up in the readahead * pages code (starting at btrfs_readahead()), which will lock the range, * find that ordered extent and then wait for it to complete (at * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since * obviously the ordered extent can never complete as we didn't submit * yet the respective bio(s). This always happens when the buffer is * memory mapped to the same file range, since the iomap DIO code always * invalidates pages in the target file range (after starting and waiting * for any writeback). * * So here we disable page faults in the iov_iter and then retry if we * got -EFAULT, faulting in the pages before the retry. */ again: from->nofault = true; dio = btrfs_dio_write(iocb, from, written); from->nofault = false; if (IS_ERR_OR_NULL(dio)) { ret = PTR_ERR_OR_ZERO(dio); } else { /* * If we have a synchronous write, we must make sure the fsync * triggered by the iomap_dio_complete() call below doesn't * deadlock on the inode lock - we are already holding it and we * can't call it after unlocking because we may need to complete * partial writes due to the input buffer (or parts of it) not * being already faulted in. */ ASSERT(current->journal_info == NULL); current->journal_info = BTRFS_TRANS_DIO_WRITE_STUB; ret = iomap_dio_complete(dio); current->journal_info = NULL; } /* No increment (+=) because iomap returns a cumulative value. */ if (ret > 0) written = ret; if (iov_iter_count(from) > 0 && (ret == -EFAULT || ret > 0)) { const size_t left = iov_iter_count(from); /* * We have more data left to write. Try to fault in as many as * possible of the remainder pages and retry. We do this without * releasing and locking again the inode, to prevent races with * truncate. * * Also, in case the iov refers to pages in the file range of the * file we want to write to (due to a mmap), we could enter an * infinite loop if we retry after faulting the pages in, since * iomap will invalidate any pages in the range early on, before * it tries to fault in the pages of the iov. So we keep track of * how much was left of iov in the previous EFAULT and fallback * to buffered IO in case we haven't made any progress. */ if (left == prev_left) { ret = -ENOTBLK; } else { fault_in_iov_iter_readable(from, left); prev_left = left; goto again; } } btrfs_inode_unlock(BTRFS_I(inode), ilock_flags); /* * If 'ret' is -ENOTBLK or we have not written all data, then it means * we must fallback to buffered IO. */ if ((ret < 0 && ret != -ENOTBLK) || !iov_iter_count(from)) goto out; buffered: /* * If we are in a NOWAIT context, then return -EAGAIN to signal the caller * it must retry the operation in a context where blocking is acceptable, * because even if we end up not blocking during the buffered IO attempt * below, we will block when flushing and waiting for the IO. */ if (iocb->ki_flags & IOCB_NOWAIT) { ret = -EAGAIN; goto out; } pos = iocb->ki_pos; written_buffered = btrfs_buffered_write(iocb, from); if (written_buffered < 0) { ret = written_buffered; goto out; } /* * Ensure all data is persisted. We want the next direct IO read to be * able to read what was just written. */ endbyte = pos + written_buffered - 1; ret = btrfs_fdatawrite_range(BTRFS_I(inode), pos, endbyte); if (ret) goto out; ret = filemap_fdatawait_range(inode->i_mapping, pos, endbyte); if (ret) goto out; written += written_buffered; iocb->ki_pos = pos + written_buffered; invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT, endbyte >> PAGE_SHIFT); out: return ret < 0 ? ret : written; } static int check_direct_read(struct btrfs_fs_info *fs_info, const struct iov_iter *iter, loff_t offset) { int ret; int i, seg; ret = check_direct_IO(fs_info, iter, offset); if (ret < 0) return ret; if (!iter_is_iovec(iter)) return 0; for (seg = 0; seg < iter->nr_segs; seg++) { for (i = seg + 1; i < iter->nr_segs; i++) { const struct iovec *iov1 = iter_iov(iter) + seg; const struct iovec *iov2 = iter_iov(iter) + i; if (iov1->iov_base == iov2->iov_base) return -EINVAL; } } return 0; } ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); size_t prev_left = 0; ssize_t read = 0; ssize_t ret; if (fsverity_active(inode)) return 0; if (check_direct_read(inode_to_fs_info(inode), to, iocb->ki_pos)) return 0; btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED); again: /* * This is similar to what we do for direct IO writes, see the comment * at btrfs_direct_write(), but we also disable page faults in addition * to disabling them only at the iov_iter level. This is because when * reading from a hole or prealloc extent, iomap calls iov_iter_zero(), * which can still trigger page fault ins despite having set ->nofault * to true of our 'to' iov_iter. * * The difference to direct IO writes is that we deadlock when trying * to lock the extent range in the inode's tree during he page reads * triggered by the fault in (while for writes it is due to waiting for * our own ordered extent). This is because for direct IO reads, * btrfs_dio_iomap_begin() returns with the extent range locked, which * is only unlocked in the endio callback (end_bio_extent_readpage()). */ pagefault_disable(); to->nofault = true; ret = btrfs_dio_read(iocb, to, read); to->nofault = false; pagefault_enable(); /* No increment (+=) because iomap returns a cumulative value. */ if (ret > 0) read = ret; if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) { const size_t left = iov_iter_count(to); if (left == prev_left) { /* * We didn't make any progress since the last attempt, * fallback to a buffered read for the remainder of the * range. This is just to avoid any possibility of looping * for too long. */ ret = read; } else { /* * We made some progress since the last retry or this is * the first time we are retrying. Fault in as many pages * as possible and retry. */ fault_in_iov_iter_writeable(to, left); prev_left = left; goto again; } } btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED); return ret < 0 ? ret : read; } int __init btrfs_init_dio(void) { if (bioset_init(&btrfs_dio_bioset, BIO_POOL_SIZE, offsetof(struct btrfs_dio_private, bbio.bio), BIOSET_NEED_BVECS)) return -ENOMEM; return 0; } void __cold btrfs_destroy_dio(void) { bioset_exit(&btrfs_dio_bioset); }
7 3 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H #define _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H #ifndef __LITTLE_ENDIAN #define __LITTLE_ENDIAN 1234 #endif #ifndef __LITTLE_ENDIAN_BITFIELD #define __LITTLE_ENDIAN_BITFIELD #endif #include <linux/stddef.h> #include <linux/types.h> #include <linux/swab.h> #define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) #define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) #define __constant_htons(x) ((__force __be16)___constant_swab16((x))) #define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) #define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) #define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) #define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) #define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) #define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) #define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) #define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) #define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) #define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) #define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) #define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) #define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) #define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) #define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) #define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) #define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) #define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) #define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) #define __cpu_to_be64(x) ((__force __be64)__swab64((x))) #define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) #define __cpu_to_be32(x) ((__force __be32)__swab32((x))) #define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) #define __cpu_to_be16(x) ((__force __be16)__swab16((x))) #define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) static __always_inline __le64 __cpu_to_le64p(const __u64 *p) { return (__force __le64)*p; } static __always_inline __u64 __le64_to_cpup(const __le64 *p) { return (__force __u64)*p; } static __always_inline __le32 __cpu_to_le32p(const __u32 *p) { return (__force __le32)*p; } static __always_inline __u32 __le32_to_cpup(const __le32 *p) { return (__force __u32)*p; } static __always_inline __le16 __cpu_to_le16p(const __u16 *p) { return (__force __le16)*p; } static __always_inline __u16 __le16_to_cpup(const __le16 *p) { return (__force __u16)*p; } static __always_inline __be64 __cpu_to_be64p(const __u64 *p) { return (__force __be64)__swab64p(p); } static __always_inline __u64 __be64_to_cpup(const __be64 *p) { return __swab64p((__u64 *)p); } static __always_inline __be32 __cpu_to_be32p(const __u32 *p) { return (__force __be32)__swab32p(p); } static __always_inline __u32 __be32_to_cpup(const __be32 *p) { return __swab32p((__u32 *)p); } static __always_inline __be16 __cpu_to_be16p(const __u16 *p) { return (__force __be16)__swab16p(p); } static __always_inline __u16 __be16_to_cpup(const __be16 *p) { return __swab16p((__u16 *)p); } #define __cpu_to_le64s(x) do { (void)(x); } while (0) #define __le64_to_cpus(x) do { (void)(x); } while (0) #define __cpu_to_le32s(x) do { (void)(x); } while (0) #define __le32_to_cpus(x) do { (void)(x); } while (0) #define __cpu_to_le16s(x) do { (void)(x); } while (0) #define __le16_to_cpus(x) do { (void)(x); } while (0) #define __cpu_to_be64s(x) __swab64s((x)) #define __be64_to_cpus(x) __swab64s((x)) #define __cpu_to_be32s(x) __swab32s((x)) #define __be32_to_cpus(x) __swab32s((x)) #define __cpu_to_be16s(x) __swab16s((x)) #define __be16_to_cpus(x) __swab16s((x)) #endif /* _UAPI_LINUX_BYTEORDER_LITTLE_ENDIAN_H */
3 3 3 3 3 3 3 1 2 2 2 2 2 2 13 7 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 /* * IPv4 specific functions of netfilter core * * Rusty Russell (C) 2000 -- This code is GPL. * Patrick McHardy (C) 2006-2012 */ #include <linux/kernel.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/gfp.h> #include <linux/export.h> #include <net/route.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/inet_dscp.h> #include <net/netfilter/nf_queue.h> /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type) { const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; __u8 flags; struct net_device *dev = skb_dst(skb)->dev; struct flow_keys flkeys; unsigned int hh_len; sk = sk_to_full_sk(sk); flags = sk ? inet_sk_flowi_flags(sk) : 0; if (addr_type == RTN_UNSPEC) addr_type = inet_addr_type_dev_table(net, dev, saddr); if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) flags |= FLOWI_FLAG_ANYSRC; else saddr = 0; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. */ fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(iph)); fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0; fl4.flowi4_l3mdev = l3mdev_master_ifindex(dev); fl4.flowi4_mark = skb->mark; fl4.flowi4_flags = flags; fib4_rules_early_flow_dissect(net, skb, &fl4, &flkeys); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return PTR_ERR(rt); /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); if (skb_dst(skb)->error) return skb_dst(skb)->error; #ifdef CONFIG_XFRM if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && xfrm_decode_session(net, skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -ENOMEM; return 0; } EXPORT_SYMBOL(ip_route_me_harder); int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict __always_unused) { struct rtable *rt = ip_route_output_key(net, &fl->u.ip4); if (IS_ERR(rt)) return PTR_ERR(rt); *dst = &rt->dst; return 0; } EXPORT_SYMBOL_GPL(nf_ip_route);
8 8 1 4 6 5 1 7 2 3 3 2 2 1 3 4 7 7 5 2 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 // SPDX-License-Identifier: GPL-2.0-or-later /* * Glue Code for assembler optimized version of 3DES * * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> */ #include <crypto/algapi.h> #include <crypto/des.h> #include <crypto/internal/skcipher.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> struct des3_ede_x86_ctx { struct des3_ede_ctx enc; struct des3_ede_ctx dec; }; /* regular block cipher functions */ asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst, const u8 *src); /* 3-way parallel cipher functions */ asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst, const u8 *src); static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *enc_ctx = ctx->enc.expkey; des3_ede_x86_64_crypt_blk(enc_ctx, dst, src); } static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *dec_ctx = ctx->dec.expkey; des3_ede_x86_64_crypt_blk(dec_ctx, dst, src); } static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, const u8 *src) { u32 *dec_ctx = ctx->dec.expkey; des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src); } static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src); } static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src); } static int ecb_crypt(struct skcipher_request *req, const u32 *expkey) { const unsigned int bsize = DES3_EDE_BLOCK_SIZE; struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes)) { const u8 *wsrc = walk.src.virt.addr; u8 *wdst = walk.dst.virt.addr; /* Process four block batch */ if (nbytes >= bsize * 3) { do { des3_ede_x86_64_crypt_blk_3way(expkey, wdst, wsrc); wsrc += bsize * 3; wdst += bsize * 3; nbytes -= bsize * 3; } while (nbytes >= bsize * 3); if (nbytes < bsize) goto done; } /* Handle leftovers */ do { des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc); wsrc += bsize; wdst += bsize; nbytes -= bsize; } while (nbytes >= bsize); done: err = skcipher_walk_done(&walk, nbytes); } return err; } static int ecb_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); return ecb_crypt(req, ctx->enc.expkey); } static int ecb_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); return ecb_crypt(req, ctx->dec.expkey); } static unsigned int __cbc_encrypt(struct des3_ede_x86_ctx *ctx, struct skcipher_walk *walk) { unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 *iv = (u64 *)walk->iv; do { *dst = *src ^ *iv; des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst); iv = dst; src += 1; dst += 1; nbytes -= bsize; } while (nbytes >= bsize); *(u64 *)walk->iv = *iv; return nbytes; } static int cbc_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { nbytes = __cbc_encrypt(ctx, &walk); err = skcipher_walk_done(&walk, nbytes); } return err; } static unsigned int __cbc_decrypt(struct des3_ede_x86_ctx *ctx, struct skcipher_walk *walk) { unsigned int bsize = DES3_EDE_BLOCK_SIZE; unsigned int nbytes = walk->nbytes; u64 *src = (u64 *)walk->src.virt.addr; u64 *dst = (u64 *)walk->dst.virt.addr; u64 ivs[3 - 1]; u64 last_iv; /* Start of the last block. */ src += nbytes / bsize - 1; dst += nbytes / bsize - 1; last_iv = *src; /* Process four block batch */ if (nbytes >= bsize * 3) { do { nbytes -= bsize * 3 - bsize; src -= 3 - 1; dst -= 3 - 1; ivs[0] = src[0]; ivs[1] = src[1]; des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); dst[1] ^= ivs[0]; dst[2] ^= ivs[1]; nbytes -= bsize; if (nbytes < bsize) goto done; *dst ^= *(src - 1); src -= 1; dst -= 1; } while (nbytes >= bsize * 3); } /* Handle leftovers */ for (;;) { des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src); nbytes -= bsize; if (nbytes < bsize) break; *dst ^= *(src - 1); src -= 1; dst -= 1; } done: *dst ^= *(u64 *)walk->iv; *(u64 *)walk->iv = last_iv; return nbytes; } static int cbc_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct des3_ede_x86_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int err; err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes) { nbytes = __cbc_decrypt(ctx, &walk); err = skcipher_walk_done(&walk, nbytes); } return err; } static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm); u32 i, j, tmp; int err; err = des3_ede_expand_key(&ctx->enc, key, keylen); if (err == -ENOKEY) { if (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) err = -EINVAL; else err = 0; } if (err) { memset(ctx, 0, sizeof(*ctx)); return err; } /* Fix encryption context for this implementation and form decryption * context. */ j = DES3_EDE_EXPKEY_WORDS - 2; for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) { tmp = ror32(ctx->enc.expkey[i + 1], 4); ctx->enc.expkey[i + 1] = tmp; ctx->dec.expkey[j + 0] = ctx->enc.expkey[i + 0]; ctx->dec.expkey[j + 1] = tmp; } return 0; } static int des3_ede_x86_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return des3_ede_x86_setkey(&tfm->base, key, keylen); } static struct crypto_alg des3_ede_cipher = { .cra_name = "des3_ede", .cra_driver_name = "des3_ede-asm", .cra_priority = 200, .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .cra_module = THIS_MODULE, .cra_u = { .cipher = { .cia_min_keysize = DES3_EDE_KEY_SIZE, .cia_max_keysize = DES3_EDE_KEY_SIZE, .cia_setkey = des3_ede_x86_setkey, .cia_encrypt = des3_ede_x86_encrypt, .cia_decrypt = des3_ede_x86_decrypt, } } }; static struct skcipher_alg des3_ede_skciphers[] = { { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3_ede-asm", .base.cra_priority = 300, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .base.cra_module = THIS_MODULE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = des3_ede_x86_setkey_skcipher, .encrypt = ecb_encrypt, .decrypt = ecb_decrypt, }, { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3_ede-asm", .base.cra_priority = 300, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct des3_ede_x86_ctx), .base.cra_module = THIS_MODULE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = des3_ede_x86_setkey_skcipher, .encrypt = cbc_encrypt, .decrypt = cbc_decrypt, } }; static bool is_blacklisted_cpu(void) { if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) return false; if (boot_cpu_data.x86 == 0x0f) { /* * On Pentium 4, des3_ede-x86_64 is slower than generic C * implementation because use of 64bit rotates (which are really * slow on P4). Therefore blacklist P4s. */ return true; } return false; } static int force; module_param(force, int, 0); MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); static int __init des3_ede_x86_init(void) { int err; if (!force && is_blacklisted_cpu()) { pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n"); return -ENODEV; } err = crypto_register_alg(&des3_ede_cipher); if (err) return err; err = crypto_register_skciphers(des3_ede_skciphers, ARRAY_SIZE(des3_ede_skciphers)); if (err) crypto_unregister_alg(&des3_ede_cipher); return err; } static void __exit des3_ede_x86_fini(void) { crypto_unregister_alg(&des3_ede_cipher); crypto_unregister_skciphers(des3_ede_skciphers, ARRAY_SIZE(des3_ede_skciphers)); } module_init(des3_ede_x86_init); module_exit(des3_ede_x86_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); MODULE_ALIAS_CRYPTO("des3_ede"); MODULE_ALIAS_CRYPTO("des3_ede-asm"); MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
814 1322 1322 1317 1324 1325 1325 1321 2 1324 942 811 1325 6 1323 1325 817 818 819 817 819 818 816 814 819 1324 819 817 819 1323 1321 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 // SPDX-License-Identifier: GPL-2.0 /* * FPU signal frame handling routines. */ #include <linux/compat.h> #include <linux/cpu.h> #include <linux/pagemap.h> #include <asm/fpu/signal.h> #include <asm/fpu/regset.h> #include <asm/fpu/xstate.h> #include <asm/sigframe.h> #include <asm/trapnr.h> #include <asm/trace/fpu.h> #include "context.h" #include "internal.h" #include "legacy.h" #include "xstate.h" /* * Check for the presence of extended state information in the * user fpstate pointer in the sigcontext. */ static inline bool check_xstate_in_sigframe(struct fxregs_state __user *fxbuf, struct _fpx_sw_bytes *fx_sw) { void __user *fpstate = fxbuf; unsigned int magic2; if (__copy_from_user(fx_sw, &fxbuf->sw_reserved[0], sizeof(*fx_sw))) return false; /* Check for the first magic field */ if (fx_sw->magic1 != FP_XSTATE_MAGIC1) goto setfx; /* * Check for the presence of second magic word at the end of memory * layout. This detects the case where the user just copied the legacy * fpstate layout with out copying the extended state information * in the memory layout. */ if (__get_user(magic2, (__u32 __user *)(fpstate + x86_task_fpu(current)->fpstate->user_size))) return false; if (likely(magic2 == FP_XSTATE_MAGIC2)) return true; setfx: trace_x86_fpu_xstate_check_failed(x86_task_fpu(current)); /* Set the parameters for fx only state */ fx_sw->magic1 = 0; fx_sw->xstate_size = sizeof(struct fxregs_state); fx_sw->xfeatures = XFEATURE_MASK_FPSSE; return true; } /* * Signal frame handlers. */ static inline bool save_fsave_header(struct task_struct *tsk, void __user *buf) { if (use_fxsr()) { struct xregs_state *xsave = &x86_task_fpu(tsk)->fpstate->regs.xsave; struct user_i387_ia32_struct env; struct _fpstate_32 __user *fp = buf; fpregs_lock(); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) fxsave(&x86_task_fpu(tsk)->fpstate->regs.fxsave); fpregs_unlock(); convert_from_fxsr(&env, tsk); if (__copy_to_user(buf, &env, sizeof(env)) || __put_user(xsave->i387.swd, &fp->status) || __put_user(X86_FXSR_MAGIC, &fp->magic)) return false; } else { struct fregs_state __user *fp = buf; u32 swd; if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status)) return false; } return true; } /* * Prepare the SW reserved portion of the fxsave memory layout, indicating * the presence of the extended state information in the memory layout * pointed to by the fpstate pointer in the sigcontext. * This is saved when ever the FP and extended state context is * saved on the user stack during the signal handler delivery to the user. */ static inline void save_sw_bytes(struct _fpx_sw_bytes *sw_bytes, bool ia32_frame, struct fpstate *fpstate) { sw_bytes->magic1 = FP_XSTATE_MAGIC1; sw_bytes->extended_size = fpstate->user_size + FP_XSTATE_MAGIC2_SIZE; sw_bytes->xfeatures = fpstate->user_xfeatures; sw_bytes->xstate_size = fpstate->user_size; if (ia32_frame) sw_bytes->extended_size += sizeof(struct fregs_state); } static inline bool save_xstate_epilog(void __user *buf, int ia32_frame, struct fpstate *fpstate) { struct xregs_state __user *x = buf; struct _fpx_sw_bytes sw_bytes = {}; int err; /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ save_sw_bytes(&sw_bytes, ia32_frame, fpstate); err = __copy_to_user(&x->i387.sw_reserved, &sw_bytes, sizeof(sw_bytes)); if (!use_xsave()) return !err; err |= __put_user(FP_XSTATE_MAGIC2, (__u32 __user *)(buf + fpstate->user_size)); /* * For legacy compatible, we always set FP/SSE bits in the bit * vector while saving the state to the user context. This will * enable us capturing any changes(during sigreturn) to * the FP/SSE bits by the legacy applications which don't touch * xfeatures in the xsave header. * * xsave aware apps can change the xfeatures in the xsave * header as well as change any contents in the memory layout. * xrestore as part of sigreturn will capture all the changes. */ err |= set_xfeature_in_sigframe(x, XFEATURE_MASK_FPSSE); return !err; } static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf, u32 pkru) { if (use_xsave()) return xsave_to_user_sigframe(buf, pkru); if (use_fxsr()) return fxsave_to_user_sigframe((struct fxregs_state __user *) buf); else return fnsave_to_user_sigframe((struct fregs_state __user *) buf); } /* * Save the fpu, extended register state to the user signal frame. * * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save * state is copied. * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'. * * buf == buf_fx for 64-bit frames and 32-bit fsave frame. * buf != buf_fx for 32-bit frames with fxstate. * * Save it directly to the user frame with disabled page fault handler. If * that faults, try to clear the frame which handles the page fault. * * If this is a 32-bit frame with fxstate, put a fsave header before * the aligned state at 'buf_fx'. * * For [f]xsave state, update the SW reserved fields in the [f]xsave frame * indicating the absence/presence of the extended state to the user. */ bool copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size, u32 pkru) { struct task_struct *tsk = current; struct fpstate *fpstate = x86_task_fpu(tsk)->fpstate; bool ia32_fxstate = (buf != buf_fx); int ret; ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) || IS_ENABLED(CONFIG_IA32_EMULATION)); if (!static_cpu_has(X86_FEATURE_FPU)) { struct user_i387_ia32_struct fp; fpregs_soft_get(current, NULL, (struct membuf){.p = &fp, .left = sizeof(fp)}); return !copy_to_user(buf, &fp, sizeof(fp)); } if (!access_ok(buf, size)) return false; if (use_xsave()) { struct xregs_state __user *xbuf = buf_fx; /* * Clear the xsave header first, so that reserved fields are * initialized to zero. */ if (__clear_user(&xbuf->header, sizeof(xbuf->header))) return false; } retry: /* * Load the FPU registers if they are not valid for the current task. * With a valid FPU state we can attempt to save the state directly to * userland's stack frame which will likely succeed. If it does not, * resolve the fault in the user memory and try again. */ fpregs_lock(); if (test_thread_flag(TIF_NEED_FPU_LOAD)) fpregs_restore_userregs(); pagefault_disable(); ret = copy_fpregs_to_sigframe(buf_fx, pkru); pagefault_enable(); fpregs_unlock(); if (ret) { if (!__clear_user(buf_fx, fpstate->user_size)) goto retry; return false; } /* Save the fsave header for the 32-bit frames. */ if ((ia32_fxstate || !use_fxsr()) && !save_fsave_header(tsk, buf)) return false; if (use_fxsr() && !save_xstate_epilog(buf_fx, ia32_fxstate, fpstate)) return false; return true; } static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures, u64 xrestore, bool fx_only) { if (use_xsave()) { u64 init_bv = ufeatures & ~xrestore; int ret; if (likely(!fx_only)) ret = xrstor_from_user_sigframe(buf, xrestore); else ret = fxrstor_from_user_sigframe(buf); if (!ret && unlikely(init_bv)) os_xrstor(&init_fpstate, init_bv); return ret; } else if (use_fxsr()) { return fxrstor_from_user_sigframe(buf); } else { return frstor_from_user_sigframe(buf); } } /* * Attempt to restore the FPU registers directly from user memory. * Pagefaults are handled and any errors returned are fatal. */ static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only) { struct fpu *fpu = x86_task_fpu(current); int ret; /* Restore enabled features only. */ xrestore &= fpu->fpstate->user_xfeatures; retry: fpregs_lock(); /* Ensure that XFD is up to date */ xfd_update_state(fpu->fpstate); pagefault_disable(); ret = __restore_fpregs_from_user(buf, fpu->fpstate->user_xfeatures, xrestore, fx_only); pagefault_enable(); if (unlikely(ret)) { /* * The above did an FPU restore operation, restricted to * the user portion of the registers, and failed, but the * microcode might have modified the FPU registers * nevertheless. * * If the FPU registers do not belong to current, then * invalidate the FPU register state otherwise the task * might preempt current and return to user space with * corrupted FPU registers. */ if (test_thread_flag(TIF_NEED_FPU_LOAD)) __cpu_invalidate_fpregs_state(); fpregs_unlock(); /* Try to handle #PF, but anything else is fatal. */ if (ret != X86_TRAP_PF) return false; if (!fault_in_readable(buf, fpu->fpstate->user_size)) goto retry; return false; } /* * Restore supervisor states: previous context switch etc has done * XSAVES and saved the supervisor states in the kernel buffer from * which they can be restored now. * * It would be optimal to handle this with a single XRSTORS, but * this does not work because the rest of the FPU registers have * been restored from a user buffer directly. */ if (test_thread_flag(TIF_NEED_FPU_LOAD) && xfeatures_mask_supervisor()) os_xrstor_supervisor(fpu->fpstate); fpregs_mark_activate(); fpregs_unlock(); return true; } static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, bool ia32_fxstate) { struct task_struct *tsk = current; struct fpu *fpu = x86_task_fpu(tsk); struct user_i387_ia32_struct env; bool success, fx_only = false; union fpregs_state *fpregs; u64 user_xfeatures = 0; if (use_xsave()) { struct _fpx_sw_bytes fx_sw_user; if (!check_xstate_in_sigframe(buf_fx, &fx_sw_user)) return false; fx_only = !fx_sw_user.magic1; user_xfeatures = fx_sw_user.xfeatures; } else { user_xfeatures = XFEATURE_MASK_FPSSE; } if (likely(!ia32_fxstate)) { /* Restore the FPU registers directly from user memory. */ return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only); } /* * Copy the legacy state because the FP portion of the FX frame has * to be ignored for histerical raisins. The legacy state is folded * in once the larger state has been copied. */ if (__copy_from_user(&env, buf, sizeof(env))) return false; /* * By setting TIF_NEED_FPU_LOAD it is ensured that our xstate is * not modified on context switch and that the xstate is considered * to be loaded again on return to userland (overriding last_cpu avoids * the optimisation). */ fpregs_lock(); if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { /* * If supervisor states are available then save the * hardware state in current's fpstate so that the * supervisor state is preserved. Save the full state for * simplicity. There is no point in optimizing this by only * saving the supervisor states and then shuffle them to * the right place in memory. It's ia32 mode. Shrug. */ if (xfeatures_mask_supervisor()) os_xsave(fpu->fpstate); set_thread_flag(TIF_NEED_FPU_LOAD); } __fpu_invalidate_fpregs_state(fpu); __cpu_invalidate_fpregs_state(); fpregs_unlock(); fpregs = &fpu->fpstate->regs; if (use_xsave() && !fx_only) { if (copy_sigframe_from_user_to_xstate(tsk, buf_fx)) return false; } else { if (__copy_from_user(&fpregs->fxsave, buf_fx, sizeof(fpregs->fxsave))) return false; if (IS_ENABLED(CONFIG_X86_64)) { /* Reject invalid MXCSR values. */ if (fpregs->fxsave.mxcsr & ~mxcsr_feature_mask) return false; } else { /* Mask invalid bits out for historical reasons (broken hardware). */ fpregs->fxsave.mxcsr &= mxcsr_feature_mask; } /* Enforce XFEATURE_MASK_FPSSE when XSAVE is enabled */ if (use_xsave()) fpregs->xsave.header.xfeatures |= XFEATURE_MASK_FPSSE; } /* Fold the legacy FP storage */ convert_to_fxsr(&fpregs->fxsave, &env); fpregs_lock(); if (use_xsave()) { /* * Remove all UABI feature bits not set in user_xfeatures * from the memory xstate header which makes the full * restore below bring them into init state. This works for * fx_only mode as well because that has only FP and SSE * set in user_xfeatures. * * Preserve supervisor states! */ u64 mask = user_xfeatures | xfeatures_mask_supervisor(); fpregs->xsave.header.xfeatures &= mask; success = !os_xrstor_safe(fpu->fpstate, fpu_kernel_cfg.max_features); } else { success = !fxrstor_safe(&fpregs->fxsave); } if (likely(success)) fpregs_mark_activate(); fpregs_unlock(); return success; } static inline unsigned int xstate_sigframe_size(struct fpstate *fpstate) { unsigned int size = fpstate->user_size; return use_xsave() ? size + FP_XSTATE_MAGIC2_SIZE : size; } /* * Restore FPU state from a sigframe: */ bool fpu__restore_sig(void __user *buf, int ia32_frame) { struct fpu *fpu = x86_task_fpu(current); void __user *buf_fx = buf; bool ia32_fxstate = false; bool success = false; unsigned int size; if (unlikely(!buf)) { fpu__clear_user_states(fpu); return true; } size = xstate_sigframe_size(fpu->fpstate); ia32_frame &= (IS_ENABLED(CONFIG_X86_32) || IS_ENABLED(CONFIG_IA32_EMULATION)); /* * Only FXSR enabled systems need the FX state quirk. * FRSTOR does not need it and can use the fast path. */ if (ia32_frame && use_fxsr()) { buf_fx = buf + sizeof(struct fregs_state); size += sizeof(struct fregs_state); ia32_fxstate = true; } if (!access_ok(buf, size)) goto out; if (!IS_ENABLED(CONFIG_X86_64) && !cpu_feature_enabled(X86_FEATURE_FPU)) { success = !fpregs_soft_set(current, NULL, 0, sizeof(struct user_i387_ia32_struct), NULL, buf); } else { success = __fpu_restore_sig(buf, buf_fx, ia32_fxstate); } out: if (unlikely(!success)) fpu__clear_user_states(fpu); return success; } unsigned long fpu__alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, unsigned long *size) { unsigned long frame_size = xstate_sigframe_size(x86_task_fpu(current)->fpstate); *buf_fx = sp = round_down(sp - frame_size, 64); if (ia32_frame && use_fxsr()) { frame_size += sizeof(struct fregs_state); sp -= sizeof(struct fregs_state); } *size = frame_size; return sp; } unsigned long __init fpu__get_fpstate_size(void) { unsigned long ret = fpu_user_cfg.max_size; if (use_xsave()) ret += FP_XSTATE_MAGIC2_SIZE; /* * This space is needed on (most) 32-bit kernels, or when a 32-bit * app is running on a 64-bit kernel. To keep things simple, just * assume the worst case and always include space for 'freg_state', * even for 64-bit apps on 64-bit kernels. This wastes a bit of * space, but keeps the code simple. */ if ((IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) && use_fxsr()) ret += sizeof(struct fregs_state); return ret; }
7 4 3 3 5 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 // SPDX-License-Identifier: GPL-2.0-only /* * (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> * (C) 2011 Patrick McHardy <kaber@trash.net> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_nat.h> static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; if (mr->rangesize != 1) { pr_info_ratelimited("multiple ranges no longer supported\n"); return -EINVAL; } return nf_ct_netns_get(par->net, par->family); } static int xt_nat_checkentry(const struct xt_tgchk_param *par) { return nf_ct_netns_get(par->net, par->family); } static void xt_nat_destroy(const struct xt_tgdtor_param *par) { nf_ct_netns_put(par->net, par->family); } static void xt_nat_convert_range(struct nf_nat_range2 *dst, const struct nf_nat_ipv4_range *src) { memset(&dst->min_addr, 0, sizeof(dst->min_addr)); memset(&dst->max_addr, 0, sizeof(dst->max_addr)); memset(&dst->base_proto, 0, sizeof(dst->base_proto)); dst->flags = src->flags; dst->min_addr.ip = src->min_ip; dst->max_addr.ip = src->max_ip; dst->min_proto = src->min; dst->max_proto = src->max; } static unsigned int xt_snat_target_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; struct nf_nat_range2 range; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct != NULL && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); xt_nat_convert_range(&range, &mr->range[0]); return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); } static unsigned int xt_dnat_target_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo; struct nf_nat_range2 range; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct != NULL && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED))); xt_nat_convert_range(&range, &mr->range[0]); return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); } static unsigned int xt_snat_target_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_range *range_v1 = par->targinfo; struct nf_nat_range2 range; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct != NULL && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); memcpy(&range, range_v1, sizeof(*range_v1)); memset(&range.base_proto, 0, sizeof(range.base_proto)); return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); } static unsigned int xt_dnat_target_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_range *range_v1 = par->targinfo; struct nf_nat_range2 range; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct != NULL && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED))); memcpy(&range, range_v1, sizeof(*range_v1)); memset(&range.base_proto, 0, sizeof(range.base_proto)); return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); } static unsigned int xt_snat_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_range2 *range = par->targinfo; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct != NULL && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY))); return nf_nat_setup_info(ct, range, NF_NAT_MANIP_SRC); } static unsigned int xt_dnat_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct nf_nat_range2 *range = par->targinfo; enum ip_conntrack_info ctinfo; struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); WARN_ON(!(ct != NULL && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED))); return nf_nat_setup_info(ct, range, NF_NAT_MANIP_DST); } static struct xt_target xt_nat_target_reg[] __read_mostly = { { .name = "SNAT", .revision = 0, .checkentry = xt_nat_checkentry_v0, .destroy = xt_nat_destroy, .target = xt_snat_target_v0, .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), .family = NFPROTO_IPV4, .table = "nat", .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), .me = THIS_MODULE, }, { .name = "DNAT", .revision = 0, .checkentry = xt_nat_checkentry_v0, .destroy = xt_nat_destroy, .target = xt_dnat_target_v0, .targetsize = sizeof(struct nf_nat_ipv4_multi_range_compat), .family = NFPROTO_IPV4, .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), .me = THIS_MODULE, }, { .name = "SNAT", .revision = 1, .checkentry = xt_nat_checkentry, .destroy = xt_nat_destroy, .target = xt_snat_target_v1, .targetsize = sizeof(struct nf_nat_range), .table = "nat", .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), .me = THIS_MODULE, }, { .name = "DNAT", .revision = 1, .checkentry = xt_nat_checkentry, .destroy = xt_nat_destroy, .target = xt_dnat_target_v1, .targetsize = sizeof(struct nf_nat_range), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), .me = THIS_MODULE, }, { .name = "SNAT", .revision = 2, .checkentry = xt_nat_checkentry, .destroy = xt_nat_destroy, .target = xt_snat_target_v2, .targetsize = sizeof(struct nf_nat_range2), .table = "nat", .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN), .me = THIS_MODULE, }, { .name = "DNAT", .revision = 2, .checkentry = xt_nat_checkentry, .destroy = xt_nat_destroy, .target = xt_dnat_target_v2, .targetsize = sizeof(struct nf_nat_range2), .table = "nat", .hooks = (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT), .me = THIS_MODULE, }, }; static int __init xt_nat_init(void) { return xt_register_targets(xt_nat_target_reg, ARRAY_SIZE(xt_nat_target_reg)); } static void __exit xt_nat_exit(void) { xt_unregister_targets(xt_nat_target_reg, ARRAY_SIZE(xt_nat_target_reg)); } module_init(xt_nat_init); module_exit(xt_nat_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_ALIAS("ipt_SNAT"); MODULE_ALIAS("ipt_DNAT"); MODULE_ALIAS("ip6t_SNAT"); MODULE_ALIAS("ip6t_DNAT"); MODULE_DESCRIPTION("SNAT and DNAT targets support");
1 2 2 8 8 8 4 8 10 2 10 2 10 10 3 7 10 18 1 17 1 10 2 4 4 2 4 1 18 1 1 1 1 1 1 1 24 25 25 23 1 1 18 3 15 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/nospec.h> #include <linux/io_uring.h> #include <uapi/linux/io_uring.h> #include "io_uring.h" #include "rsrc.h" #include "filetable.h" #include "alloc_cache.h" #include "msg_ring.h" /* All valid masks for MSG_RING */ #define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \ IORING_MSG_RING_FLAGS_PASS) struct io_msg { struct file *file; struct file *src_file; struct callback_head tw; u64 user_data; u32 len; u32 cmd; u32 src_fd; union { u32 dst_fd; u32 cqe_flags; }; u32 flags; }; static void io_double_unlock_ctx(struct io_ring_ctx *octx) { mutex_unlock(&octx->uring_lock); } static int io_lock_external_ctx(struct io_ring_ctx *octx, unsigned int issue_flags) { /* * To ensure proper ordering between the two ctxs, we can only * attempt a trylock on the target. If that fails and we already have * the source ctx lock, punt to io-wq. */ if (!(issue_flags & IO_URING_F_UNLOCKED)) { if (!mutex_trylock(&octx->uring_lock)) return -EAGAIN; return 0; } mutex_lock(&octx->uring_lock); return 0; } void io_msg_ring_cleanup(struct io_kiocb *req) { struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); if (WARN_ON_ONCE(!msg->src_file)) return; fput(msg->src_file); msg->src_file = NULL; } static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx) { return target_ctx->task_complete; } static void io_msg_tw_complete(struct io_kiocb *req, io_tw_token_t tw) { struct io_ring_ctx *ctx = req->ctx; io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags); if (spin_trylock(&ctx->msg_lock)) { if (io_alloc_cache_put(&ctx->msg_cache, req)) req = NULL; spin_unlock(&ctx->msg_lock); } if (req) kmem_cache_free(req_cachep, req); percpu_ref_put(&ctx->refs); } static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req, int res, u32 cflags, u64 user_data) { if (!READ_ONCE(ctx->submitter_task)) { kmem_cache_free(req_cachep, req); return -EOWNERDEAD; } req->opcode = IORING_OP_NOP; req->cqe.user_data = user_data; io_req_set_res(req, res, cflags); percpu_ref_get(&ctx->refs); req->ctx = ctx; req->tctx = NULL; req->io_task_work.func = io_msg_tw_complete; io_req_task_work_add_remote(req, IOU_F_TWQ_LAZY_WAKE); return 0; } static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx) { struct io_kiocb *req = NULL; if (spin_trylock(&ctx->msg_lock)) { req = io_alloc_cache_get(&ctx->msg_cache); spin_unlock(&ctx->msg_lock); if (req) return req; } return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); } static int io_msg_data_remote(struct io_ring_ctx *target_ctx, struct io_msg *msg) { struct io_kiocb *target; u32 flags = 0; target = io_msg_get_kiocb(target_ctx); if (unlikely(!target)) return -ENOMEM; if (msg->flags & IORING_MSG_RING_FLAGS_PASS) flags = msg->cqe_flags; return io_msg_remote_post(target_ctx, target, msg->len, flags, msg->user_data); } static int __io_msg_ring_data(struct io_ring_ctx *target_ctx, struct io_msg *msg, unsigned int issue_flags) { u32 flags = 0; int ret; if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS) return -EINVAL; if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd) return -EINVAL; if (target_ctx->flags & IORING_SETUP_R_DISABLED) return -EBADFD; if (io_msg_need_remote(target_ctx)) return io_msg_data_remote(target_ctx, msg); if (msg->flags & IORING_MSG_RING_FLAGS_PASS) flags = msg->cqe_flags; ret = -EOVERFLOW; if (target_ctx->flags & IORING_SETUP_IOPOLL) { if (unlikely(io_lock_external_ctx(target_ctx, issue_flags))) return -EAGAIN; } if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags)) ret = 0; if (target_ctx->flags & IORING_SETUP_IOPOLL) io_double_unlock_ctx(target_ctx); return ret; } static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *target_ctx = req->file->private_data; struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); return __io_msg_ring_data(target_ctx, msg, issue_flags); } static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) { struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct io_ring_ctx *ctx = req->ctx; struct io_rsrc_node *node; int ret = -EBADF; io_ring_submit_lock(ctx, issue_flags); node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd); if (node) { msg->src_file = io_slot_file(node); if (msg->src_file) get_file(msg->src_file); req->flags |= REQ_F_NEED_CLEANUP; ret = 0; } io_ring_submit_unlock(ctx, issue_flags); return ret; } static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *target_ctx = req->file->private_data; struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct file *src_file = msg->src_file; int ret; if (unlikely(io_lock_external_ctx(target_ctx, issue_flags))) return -EAGAIN; ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd); if (ret < 0) goto out_unlock; msg->src_file = NULL; req->flags &= ~REQ_F_NEED_CLEANUP; if (msg->flags & IORING_MSG_RING_CQE_SKIP) goto out_unlock; /* * If this fails, the target still received the file descriptor but * wasn't notified of the fact. This means that if this request * completes with -EOVERFLOW, then the sender must ensure that a * later IORING_OP_MSG_RING delivers the message. */ if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0)) ret = -EOVERFLOW; out_unlock: io_double_unlock_ctx(target_ctx); return ret; } static void io_msg_tw_fd_complete(struct callback_head *head) { struct io_msg *msg = container_of(head, struct io_msg, tw); struct io_kiocb *req = cmd_to_io_kiocb(msg); int ret = -EOWNERDEAD; if (!(current->flags & PF_EXITING)) ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED); if (ret < 0) req_set_fail(req); io_req_queue_tw_complete(req, ret); } static int io_msg_fd_remote(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->file->private_data; struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct task_struct *task = READ_ONCE(ctx->submitter_task); if (unlikely(!task)) return -EOWNERDEAD; init_task_work(&msg->tw, io_msg_tw_fd_complete); if (task_work_add(task, &msg->tw, TWA_SIGNAL)) return -EOWNERDEAD; return IOU_ISSUE_SKIP_COMPLETE; } static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *target_ctx = req->file->private_data; struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct io_ring_ctx *ctx = req->ctx; if (msg->len) return -EINVAL; if (target_ctx == ctx) return -EINVAL; if (target_ctx->flags & IORING_SETUP_R_DISABLED) return -EBADFD; if (!msg->src_file) { int ret = io_msg_grab_file(req, issue_flags); if (unlikely(ret)) return ret; } if (io_msg_need_remote(target_ctx)) return io_msg_fd_remote(req); return io_msg_install_complete(req, issue_flags); } static int __io_msg_ring_prep(struct io_msg *msg, const struct io_uring_sqe *sqe) { if (unlikely(sqe->buf_index || sqe->personality)) return -EINVAL; msg->src_file = NULL; msg->user_data = READ_ONCE(sqe->off); msg->len = READ_ONCE(sqe->len); msg->cmd = READ_ONCE(sqe->addr); msg->src_fd = READ_ONCE(sqe->addr3); msg->dst_fd = READ_ONCE(sqe->file_index); msg->flags = READ_ONCE(sqe->msg_ring_flags); if (msg->flags & ~IORING_MSG_RING_MASK) return -EINVAL; return 0; } int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { return __io_msg_ring_prep(io_kiocb_to_cmd(req, struct io_msg), sqe); } int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) { struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); int ret; ret = -EBADFD; if (!io_is_uring_fops(req->file)) goto done; switch (msg->cmd) { case IORING_MSG_DATA: ret = io_msg_ring_data(req, issue_flags); break; case IORING_MSG_SEND_FD: ret = io_msg_send_fd(req, issue_flags); break; default: ret = -EINVAL; break; } done: if (ret < 0) { if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE) return ret; req_set_fail(req); } io_req_set_res(req, ret, 0); return IOU_COMPLETE; } int io_uring_sync_msg_ring(struct io_uring_sqe *sqe) { struct io_msg io_msg = { }; int ret; ret = __io_msg_ring_prep(&io_msg, sqe); if (unlikely(ret)) return ret; /* * Only data sending supported, not IORING_MSG_SEND_FD as that one * doesn't make sense without a source ring to send files from. */ if (io_msg.cmd != IORING_MSG_DATA) return -EINVAL; CLASS(fd, f)(sqe->fd); if (fd_empty(f)) return -EBADF; if (!io_is_uring_fops(fd_file(f))) return -EBADFD; return __io_msg_ring_data(fd_file(f)->private_data, &io_msg, IO_URING_F_UNLOCKED); }
3 6 3 3 16 16 1 15 14 9 9 9 3 3 3 16 16 7 7 7 7 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. */ #include "queueing.h" #include "device.h" #include "peer.h" #include "timers.h" #include "messages.h" #include "cookie.h" #include "socket.h" #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/udp.h> #include <net/ip_tunnels.h> /* Must be called with bh disabled. */ static void update_rx_stats(struct wg_peer *peer, size_t len) { dev_sw_netstats_rx_add(peer->device->dev, len); peer->rx_bytes += len; } #define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type) static size_t validate_header_len(struct sk_buff *skb) { if (unlikely(skb->len < sizeof(struct message_header))) return 0; if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) && skb->len >= MESSAGE_MINIMUM_LENGTH) return sizeof(struct message_data); if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) && skb->len == sizeof(struct message_handshake_initiation)) return sizeof(struct message_handshake_initiation); if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) && skb->len == sizeof(struct message_handshake_response)) return sizeof(struct message_handshake_response); if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) && skb->len == sizeof(struct message_handshake_cookie)) return sizeof(struct message_handshake_cookie); return 0; } static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) { size_t data_offset, data_len, header_len; struct udphdr *udp; if (unlikely(!wg_check_packet_protocol(skb) || skb_transport_header(skb) < skb->head || (skb_transport_header(skb) + sizeof(struct udphdr)) > skb_tail_pointer(skb))) return -EINVAL; /* Bogus IP header */ udp = udp_hdr(skb); data_offset = (u8 *)udp - skb->data; if (unlikely(data_offset > U16_MAX || data_offset + sizeof(struct udphdr) > skb->len)) /* Packet has offset at impossible location or isn't big enough * to have UDP fields. */ return -EINVAL; data_len = ntohs(udp->len); if (unlikely(data_len < sizeof(struct udphdr) || data_len > skb->len - data_offset)) /* UDP packet is reporting too small of a size or lying about * its size. */ return -EINVAL; data_len -= sizeof(struct udphdr); data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data; if (unlikely(!pskb_may_pull(skb, data_offset + sizeof(struct message_header)) || pskb_trim(skb, data_len + data_offset) < 0)) return -EINVAL; skb_pull(skb, data_offset); if (unlikely(skb->len != data_len)) /* Final len does not agree with calculated len */ return -EINVAL; header_len = validate_header_len(skb); if (unlikely(!header_len)) return -EINVAL; __skb_push(skb, data_offset); if (unlikely(!pskb_may_pull(skb, data_offset + header_len))) return -EINVAL; __skb_pull(skb, data_offset); return 0; } static void wg_receive_handshake_packet(struct wg_device *wg, struct sk_buff *skb) { enum cookie_mac_state mac_state; struct wg_peer *peer = NULL; /* This is global, so that our load calculation applies to the whole * system. We don't care about races with it at all. */ static u64 last_under_load; bool packet_needs_cookie; bool under_load; if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) { net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n", wg->dev->name, skb); wg_cookie_message_consume( (struct message_handshake_cookie *)skb->data, wg); return; } under_load = atomic_read(&wg->handshake_queue_len) >= MAX_QUEUED_INCOMING_HANDSHAKES / 8; if (under_load) { last_under_load = ktime_get_coarse_boottime_ns(); } else if (last_under_load) { under_load = !wg_birthdate_has_expired(last_under_load, 1); if (!under_load) last_under_load = 0; } mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, under_load); if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) { packet_needs_cookie = false; } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) { packet_needs_cookie = true; } else { net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n", wg->dev->name, skb); return; } switch (SKB_TYPE_LE32(skb)) { case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): { struct message_handshake_initiation *message = (struct message_handshake_initiation *)skb->data; if (packet_needs_cookie) { wg_packet_send_handshake_cookie(wg, skb, message->sender_index); return; } peer = wg_noise_handshake_consume_initiation(message, wg); if (unlikely(!peer)) { net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n", wg->dev->name, skb); return; } wg_socket_set_peer_endpoint_from_skb(peer, skb); net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n", wg->dev->name, peer->internal_id, &peer->endpoint.addr); wg_packet_send_handshake_response(peer); break; } case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): { struct message_handshake_response *message = (struct message_handshake_response *)skb->data; if (packet_needs_cookie) { wg_packet_send_handshake_cookie(wg, skb, message->sender_index); return; } peer = wg_noise_handshake_consume_response(message, wg); if (unlikely(!peer)) { net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n", wg->dev->name, skb); return; } wg_socket_set_peer_endpoint_from_skb(peer, skb); net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n", wg->dev->name, peer->internal_id, &peer->endpoint.addr); if (wg_noise_handshake_begin_session(&peer->handshake, &peer->keypairs)) { wg_timers_session_derived(peer); wg_timers_handshake_complete(peer); /* Calling this function will either send any existing * packets in the queue and not send a keepalive, which * is the best case, Or, if there's nothing in the * queue, it will send a keepalive, in order to give * immediate confirmation of the session. */ wg_packet_send_keepalive(peer); } break; } } if (unlikely(!peer)) { WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n"); return; } local_bh_disable(); update_rx_stats(peer, skb->len); local_bh_enable(); wg_timers_any_authenticated_packet_received(peer); wg_timers_any_authenticated_packet_traversal(peer); wg_peer_put(peer); } void wg_packet_handshake_receive_worker(struct work_struct *work) { struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue); struct sk_buff *skb; while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { wg_receive_handshake_packet(wg, skb); dev_kfree_skb(skb); atomic_dec(&wg->handshake_queue_len); cond_resched(); } } static void keep_key_fresh(struct wg_peer *peer) { struct noise_keypair *keypair; bool send; if (peer->sent_lastminute_handshake) return; rcu_read_lock_bh(); keypair = rcu_dereference_bh(peer->keypairs.current_keypair); send = keypair && READ_ONCE(keypair->sending.is_valid) && keypair->i_am_the_initiator && wg_birthdate_has_expired(keypair->sending.birthdate, REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT); rcu_read_unlock_bh(); if (unlikely(send)) { peer->sent_lastminute_handshake = true; wg_packet_send_queued_handshake_initiation(peer, false); } } static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) { struct scatterlist sg[MAX_SKB_FRAGS + 8]; struct sk_buff *trailer; unsigned int offset; int num_frags; if (unlikely(!keypair)) return false; if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) { WRITE_ONCE(keypair->receiving.is_valid, false); return false; } PACKET_CB(skb)->nonce = le64_to_cpu(((struct message_data *)skb->data)->counter); /* We ensure that the network header is part of the packet before we * call skb_cow_data, so that there's no chance that data is removed * from the skb, so that later we can extract the original endpoint. */ offset = -skb_network_offset(skb); skb_push(skb, offset); num_frags = skb_cow_data(skb, 0, &trailer); offset += sizeof(struct message_data); skb_pull(skb, offset); if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) return false; sg_init_table(sg, num_frags); if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0) return false; if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, PACKET_CB(skb)->nonce, keypair->receiving.key)) return false; /* Another ugly situation of pushing and pulling the header so as to * keep endpoint information intact. */ skb_push(skb, offset); if (pskb_trim(skb, skb->len - noise_encrypted_len(0))) return false; skb_pull(skb, offset); return true; } /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) { unsigned long index, index_current, top, i; bool ret = false; spin_lock_bh(&counter->lock); if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || their_counter >= REJECT_AFTER_MESSAGES)) goto out; ++their_counter; if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < counter->counter)) goto out; index = their_counter >> ilog2(BITS_PER_LONG); if (likely(their_counter > counter->counter)) { index_current = counter->counter >> ilog2(BITS_PER_LONG); top = min_t(unsigned long, index - index_current, COUNTER_BITS_TOTAL / BITS_PER_LONG); for (i = 1; i <= top; ++i) counter->backtrack[(i + index_current) & ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; WRITE_ONCE(counter->counter, their_counter); } index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), &counter->backtrack[index]); out: spin_unlock_bh(&counter->lock); return ret; } #include "selftest/counter.c" static void wg_packet_consume_data_done(struct wg_peer *peer, struct sk_buff *skb, struct endpoint *endpoint) { struct net_device *dev = peer->device->dev; unsigned int len, len_before_trim; struct wg_peer *routed_peer; wg_socket_set_peer_endpoint(peer, endpoint); if (unlikely(wg_noise_received_with_keypair(&peer->keypairs, PACKET_CB(skb)->keypair))) { wg_timers_handshake_complete(peer); wg_packet_send_staged_packets(peer); } keep_key_fresh(peer); wg_timers_any_authenticated_packet_received(peer); wg_timers_any_authenticated_packet_traversal(peer); /* A packet with length 0 is a keepalive packet */ if (unlikely(!skb->len)) { update_rx_stats(peer, message_data_len(0)); net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n", dev->name, peer->internal_id, &peer->endpoint.addr); goto packet_processed; } wg_timers_data_received(peer); if (unlikely(skb_network_header(skb) < skb->head)) goto dishonest_packet_size; if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) && (ip_hdr(skb)->version == 4 || (ip_hdr(skb)->version == 6 && pskb_network_may_pull(skb, sizeof(struct ipv6hdr))))))) goto dishonest_packet_type; skb->dev = dev; /* We've already verified the Poly1305 auth tag, which means this packet * was not modified in transit. We can therefore tell the networking * stack that all checksums of every layer of encapsulation have already * been checked "by the hardware" and therefore is unnecessary to check * again in software. */ skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ~0; /* All levels */ skb->protocol = ip_tunnel_parse_protocol(skb); if (skb->protocol == htons(ETH_P_IP)) { len = ntohs(ip_hdr(skb)->tot_len); if (unlikely(len < sizeof(struct iphdr))) goto dishonest_packet_size; INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); } else if (skb->protocol == htons(ETH_P_IPV6)) { len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); } else { goto dishonest_packet_type; } if (unlikely(len > skb->len)) goto dishonest_packet_size; len_before_trim = skb->len; if (unlikely(pskb_trim(skb, len))) goto packet_processed; routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips, skb); wg_peer_put(routed_peer); /* We don't need the extra reference. */ if (unlikely(routed_peer != peer)) goto dishonest_packet_peer; napi_gro_receive(&peer->napi, skb); update_rx_stats(peer, message_data_len(len_before_trim)); return; dishonest_packet_peer: net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", dev->name, skb, peer->internal_id, &peer->endpoint.addr); DEV_STATS_INC(dev, rx_errors); DEV_STATS_INC(dev, rx_frame_errors); goto packet_processed; dishonest_packet_type: net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", dev->name, peer->internal_id, &peer->endpoint.addr); DEV_STATS_INC(dev, rx_errors); DEV_STATS_INC(dev, rx_frame_errors); goto packet_processed; dishonest_packet_size: net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", dev->name, peer->internal_id, &peer->endpoint.addr); DEV_STATS_INC(dev, rx_errors); DEV_STATS_INC(dev, rx_length_errors); goto packet_processed; packet_processed: dev_kfree_skb(skb); } int wg_packet_rx_poll(struct napi_struct *napi, int budget) { struct wg_peer *peer = container_of(napi, struct wg_peer, napi); struct noise_keypair *keypair; struct endpoint endpoint; enum packet_state state; struct sk_buff *skb; int work_done = 0; bool free; if (unlikely(budget <= 0)) return 0; while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != PACKET_STATE_UNCRYPTED) { wg_prev_queue_drop_peeked(&peer->rx_queue); keypair = PACKET_CB(skb)->keypair; free = true; if (unlikely(state != PACKET_STATE_CRYPTED)) goto next; if (unlikely(!counter_validate(&keypair->receiving_counter, PACKET_CB(skb)->nonce))) { net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", peer->device->dev->name, PACKET_CB(skb)->nonce, READ_ONCE(keypair->receiving_counter.counter)); goto next; } if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) goto next; wg_reset_packet(skb, false); wg_packet_consume_data_done(peer, skb, &endpoint); free = false; next: wg_noise_keypair_put(keypair, false); wg_peer_put(peer); if (unlikely(free)) dev_kfree_skb(skb); if (++work_done >= budget) break; } if (work_done < budget) napi_complete_done(napi, work_done); return work_done; } void wg_packet_decrypt_worker(struct work_struct *work) { struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr; struct sk_buff *skb; while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { enum packet_state state = likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; wg_queue_enqueue_per_peer_rx(skb, state); if (need_resched()) cond_resched(); } } static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) { __le32 idx = ((struct message_data *)skb->data)->key_idx; struct wg_peer *peer = NULL; int ret; rcu_read_lock_bh(); PACKET_CB(skb)->keypair = (struct noise_keypair *)wg_index_hashtable_lookup( wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx, &peer); if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair))) goto err_keypair; if (unlikely(READ_ONCE(peer->is_dead))) goto err; ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, wg->packet_crypt_wq); if (unlikely(ret == -EPIPE)) wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); if (likely(!ret || ret == -EPIPE)) { rcu_read_unlock_bh(); return; } err: wg_noise_keypair_put(PACKET_CB(skb)->keypair, false); err_keypair: rcu_read_unlock_bh(); wg_peer_put(peer); dev_kfree_skb(skb); } void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) { if (unlikely(prepare_skb_header(skb, wg) < 0)) goto err; switch (SKB_TYPE_LE32(skb)) { case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { int cpu, ret = -EBUSY; if (unlikely(!rng_is_initialized())) goto drop; if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) { if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) { ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb); spin_unlock_bh(&wg->handshake_queue.ring.producer_lock); } } else ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb); if (ret) { drop: net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", wg->dev->name, skb); goto err; } atomic_inc(&wg->handshake_queue_len); cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu); /* Queues up a call to packet_process_queued_handshake_packets(skb): */ queue_work_on(cpu, wg->handshake_receive_wq, &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work); break; } case cpu_to_le32(MESSAGE_DATA): PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb); wg_packet_consume_data(wg, skb); break; default: WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n"); goto err; } return; err: dev_kfree_skb(skb); }
7 7 4 7 7 7 7 3 4 3 3 172 172 225 2 2 37 37 220 1 1 1 1 1 1 1 1 1 1 1 5 5 5 5 5 18 18 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2019 Oracle. All Rights Reserved. * Author: Darrick J. Wong <darrick.wong@oracle.com> */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trace.h" #include "xfs_health.h" #include "xfs_ag.h" #include "xfs_btree.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_quota_defs.h" #include "xfs_rtgroup.h" static void xfs_health_unmount_group( struct xfs_group *xg, bool *warn) { unsigned int sick = 0; unsigned int checked = 0; xfs_group_measure_sickness(xg, &sick, &checked); if (sick) { trace_xfs_group_unfixed_corruption(xg, sick); *warn = true; } } /* * Warn about metadata corruption that we detected but haven't fixed, and * make sure we're not sitting on anything that would get in the way of * recovery. */ void xfs_health_unmount( struct xfs_mount *mp) { struct xfs_perag *pag = NULL; struct xfs_rtgroup *rtg = NULL; unsigned int sick = 0; unsigned int checked = 0; bool warn = false; if (xfs_is_shutdown(mp)) return; /* Measure AG corruption levels. */ while ((pag = xfs_perag_next(mp, pag))) xfs_health_unmount_group(pag_group(pag), &warn); /* Measure realtime group corruption levels. */ while ((rtg = xfs_rtgroup_next(mp, rtg))) xfs_health_unmount_group(rtg_group(rtg), &warn); /* * Measure fs corruption and keep the sample around for the warning. * See the note below for why we exempt FS_COUNTERS. */ xfs_fs_measure_sickness(mp, &sick, &checked); if (sick & ~XFS_SICK_FS_COUNTERS) { trace_xfs_fs_unfixed_corruption(mp, sick); warn = true; } if (warn) { xfs_warn(mp, "Uncorrected metadata errors detected; please run xfs_repair."); /* * We discovered uncorrected metadata problems at some point * during this filesystem mount and have advised the * administrator to run repair once the unmount completes. * * However, we must be careful -- when FSCOUNTERS are flagged * unhealthy, the unmount procedure omits writing the clean * unmount record to the log so that the next mount will run * recovery and recompute the summary counters. In other * words, we leave a dirty log to get the counters fixed. * * Unfortunately, xfs_repair cannot recover dirty logs, so if * there were filesystem problems, FSCOUNTERS was flagged, and * the administrator takes our advice to run xfs_repair, * they'll have to zap the log before repairing structures. * We don't really want to encourage this, so we mark the * FSCOUNTERS healthy so that a subsequent repair run won't see * a dirty log. */ if (sick & XFS_SICK_FS_COUNTERS) xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS); } } /* Mark unhealthy per-fs metadata. */ void xfs_fs_mark_sick( struct xfs_mount *mp, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_FS_ALL)); trace_xfs_fs_mark_sick(mp, mask); spin_lock(&mp->m_sb_lock); mp->m_fs_sick |= mask; spin_unlock(&mp->m_sb_lock); } /* Mark per-fs metadata as having been checked and found unhealthy by fsck. */ void xfs_fs_mark_corrupt( struct xfs_mount *mp, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_FS_ALL)); trace_xfs_fs_mark_corrupt(mp, mask); spin_lock(&mp->m_sb_lock); mp->m_fs_sick |= mask; mp->m_fs_checked |= mask; spin_unlock(&mp->m_sb_lock); } /* Mark a per-fs metadata healed. */ void xfs_fs_mark_healthy( struct xfs_mount *mp, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_FS_ALL)); trace_xfs_fs_mark_healthy(mp, mask); spin_lock(&mp->m_sb_lock); mp->m_fs_sick &= ~mask; if (!(mp->m_fs_sick & XFS_SICK_FS_PRIMARY)) mp->m_fs_sick &= ~XFS_SICK_FS_SECONDARY; mp->m_fs_checked |= mask; spin_unlock(&mp->m_sb_lock); } /* Sample which per-fs metadata are unhealthy. */ void xfs_fs_measure_sickness( struct xfs_mount *mp, unsigned int *sick, unsigned int *checked) { spin_lock(&mp->m_sb_lock); *sick = mp->m_fs_sick; *checked = mp->m_fs_checked; spin_unlock(&mp->m_sb_lock); } /* Mark unhealthy per-ag metadata given a raw AG number. */ void xfs_agno_mark_sick( struct xfs_mount *mp, xfs_agnumber_t agno, unsigned int mask) { struct xfs_perag *pag = xfs_perag_get(mp, agno); /* per-ag structure not set up yet? */ if (!pag) return; xfs_ag_mark_sick(pag, mask); xfs_perag_put(pag); } static inline void xfs_group_check_mask( struct xfs_group *xg, unsigned int mask) { if (xg->xg_type == XG_TYPE_AG) ASSERT(!(mask & ~XFS_SICK_AG_ALL)); else ASSERT(!(mask & ~XFS_SICK_RG_ALL)); } /* Mark unhealthy per-ag metadata. */ void xfs_group_mark_sick( struct xfs_group *xg, unsigned int mask) { xfs_group_check_mask(xg, mask); trace_xfs_group_mark_sick(xg, mask); spin_lock(&xg->xg_state_lock); xg->xg_sick |= mask; spin_unlock(&xg->xg_state_lock); } /* * Mark per-group metadata as having been checked and found unhealthy by fsck. */ void xfs_group_mark_corrupt( struct xfs_group *xg, unsigned int mask) { xfs_group_check_mask(xg, mask); trace_xfs_group_mark_corrupt(xg, mask); spin_lock(&xg->xg_state_lock); xg->xg_sick |= mask; xg->xg_checked |= mask; spin_unlock(&xg->xg_state_lock); } /* * Mark per-group metadata ok. */ void xfs_group_mark_healthy( struct xfs_group *xg, unsigned int mask) { xfs_group_check_mask(xg, mask); trace_xfs_group_mark_healthy(xg, mask); spin_lock(&xg->xg_state_lock); xg->xg_sick &= ~mask; if (!(xg->xg_sick & XFS_SICK_AG_PRIMARY)) xg->xg_sick &= ~XFS_SICK_AG_SECONDARY; xg->xg_checked |= mask; spin_unlock(&xg->xg_state_lock); } /* Sample which per-ag metadata are unhealthy. */ void xfs_group_measure_sickness( struct xfs_group *xg, unsigned int *sick, unsigned int *checked) { spin_lock(&xg->xg_state_lock); *sick = xg->xg_sick; *checked = xg->xg_checked; spin_unlock(&xg->xg_state_lock); } /* Mark unhealthy per-rtgroup metadata given a raw rt group number. */ void xfs_rgno_mark_sick( struct xfs_mount *mp, xfs_rgnumber_t rgno, unsigned int mask) { struct xfs_rtgroup *rtg = xfs_rtgroup_get(mp, rgno); /* per-rtgroup structure not set up yet? */ if (!rtg) return; xfs_group_mark_sick(rtg_group(rtg), mask); xfs_rtgroup_put(rtg); } /* Mark the unhealthy parts of an inode. */ void xfs_inode_mark_sick( struct xfs_inode *ip, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_INO_ALL)); trace_xfs_inode_mark_sick(ip, mask); spin_lock(&ip->i_flags_lock); ip->i_sick |= mask; spin_unlock(&ip->i_flags_lock); /* * Keep this inode around so we don't lose the sickness report. Scrub * grabs inodes with DONTCACHE assuming that most inode are ok, which * is not the case here. */ spin_lock(&VFS_I(ip)->i_lock); VFS_I(ip)->i_state &= ~I_DONTCACHE; spin_unlock(&VFS_I(ip)->i_lock); } /* Mark inode metadata as having been checked and found unhealthy by fsck. */ void xfs_inode_mark_corrupt( struct xfs_inode *ip, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_INO_ALL)); trace_xfs_inode_mark_corrupt(ip, mask); spin_lock(&ip->i_flags_lock); ip->i_sick |= mask; ip->i_checked |= mask; spin_unlock(&ip->i_flags_lock); /* * Keep this inode around so we don't lose the sickness report. Scrub * grabs inodes with DONTCACHE assuming that most inode are ok, which * is not the case here. */ spin_lock(&VFS_I(ip)->i_lock); VFS_I(ip)->i_state &= ~I_DONTCACHE; spin_unlock(&VFS_I(ip)->i_lock); } /* Mark parts of an inode healed. */ void xfs_inode_mark_healthy( struct xfs_inode *ip, unsigned int mask) { ASSERT(!(mask & ~XFS_SICK_INO_ALL)); trace_xfs_inode_mark_healthy(ip, mask); spin_lock(&ip->i_flags_lock); ip->i_sick &= ~mask; if (!(ip->i_sick & XFS_SICK_INO_PRIMARY)) ip->i_sick &= ~XFS_SICK_INO_SECONDARY; ip->i_checked |= mask; spin_unlock(&ip->i_flags_lock); } /* Sample which parts of an inode are unhealthy. */ void xfs_inode_measure_sickness( struct xfs_inode *ip, unsigned int *sick, unsigned int *checked) { spin_lock(&ip->i_flags_lock); *sick = ip->i_sick; *checked = ip->i_checked; spin_unlock(&ip->i_flags_lock); } /* Mappings between internal sick masks and ioctl sick masks. */ struct ioctl_sick_map { unsigned int sick_mask; unsigned int ioctl_mask; }; #define for_each_sick_map(map, m) \ for ((m) = (map); (m) < (map) + ARRAY_SIZE(map); (m)++) static const struct ioctl_sick_map fs_map[] = { { XFS_SICK_FS_COUNTERS, XFS_FSOP_GEOM_SICK_COUNTERS}, { XFS_SICK_FS_UQUOTA, XFS_FSOP_GEOM_SICK_UQUOTA }, { XFS_SICK_FS_GQUOTA, XFS_FSOP_GEOM_SICK_GQUOTA }, { XFS_SICK_FS_PQUOTA, XFS_FSOP_GEOM_SICK_PQUOTA }, { XFS_SICK_FS_QUOTACHECK, XFS_FSOP_GEOM_SICK_QUOTACHECK }, { XFS_SICK_FS_NLINKS, XFS_FSOP_GEOM_SICK_NLINKS }, { XFS_SICK_FS_METADIR, XFS_FSOP_GEOM_SICK_METADIR }, { XFS_SICK_FS_METAPATH, XFS_FSOP_GEOM_SICK_METAPATH }, }; static const struct ioctl_sick_map rt_map[] = { { XFS_SICK_RG_BITMAP, XFS_FSOP_GEOM_SICK_RT_BITMAP }, { XFS_SICK_RG_SUMMARY, XFS_FSOP_GEOM_SICK_RT_SUMMARY }, }; static inline void xfgeo_health_tick( struct xfs_fsop_geom *geo, unsigned int sick, unsigned int checked, const struct ioctl_sick_map *m) { if (checked & m->sick_mask) geo->checked |= m->ioctl_mask; if (sick & m->sick_mask) geo->sick |= m->ioctl_mask; } /* Fill out fs geometry health info. */ void xfs_fsop_geom_health( struct xfs_mount *mp, struct xfs_fsop_geom *geo) { struct xfs_rtgroup *rtg = NULL; const struct ioctl_sick_map *m; unsigned int sick; unsigned int checked; geo->sick = 0; geo->checked = 0; xfs_fs_measure_sickness(mp, &sick, &checked); for_each_sick_map(fs_map, m) xfgeo_health_tick(geo, sick, checked, m); while ((rtg = xfs_rtgroup_next(mp, rtg))) { xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked); for_each_sick_map(rt_map, m) xfgeo_health_tick(geo, sick, checked, m); } } static const struct ioctl_sick_map ag_map[] = { { XFS_SICK_AG_SB, XFS_AG_GEOM_SICK_SB }, { XFS_SICK_AG_AGF, XFS_AG_GEOM_SICK_AGF }, { XFS_SICK_AG_AGFL, XFS_AG_GEOM_SICK_AGFL }, { XFS_SICK_AG_AGI, XFS_AG_GEOM_SICK_AGI }, { XFS_SICK_AG_BNOBT, XFS_AG_GEOM_SICK_BNOBT }, { XFS_SICK_AG_CNTBT, XFS_AG_GEOM_SICK_CNTBT }, { XFS_SICK_AG_INOBT, XFS_AG_GEOM_SICK_INOBT }, { XFS_SICK_AG_FINOBT, XFS_AG_GEOM_SICK_FINOBT }, { XFS_SICK_AG_RMAPBT, XFS_AG_GEOM_SICK_RMAPBT }, { XFS_SICK_AG_REFCNTBT, XFS_AG_GEOM_SICK_REFCNTBT }, { XFS_SICK_AG_INODES, XFS_AG_GEOM_SICK_INODES }, }; /* Fill out ag geometry health info. */ void xfs_ag_geom_health( struct xfs_perag *pag, struct xfs_ag_geometry *ageo) { const struct ioctl_sick_map *m; unsigned int sick; unsigned int checked; ageo->ag_sick = 0; ageo->ag_checked = 0; xfs_group_measure_sickness(pag_group(pag), &sick, &checked); for_each_sick_map(ag_map, m) { if (checked & m->sick_mask) ageo->ag_checked |= m->ioctl_mask; if (sick & m->sick_mask) ageo->ag_sick |= m->ioctl_mask; } } static const struct ioctl_sick_map rtgroup_map[] = { { XFS_SICK_RG_SUPER, XFS_RTGROUP_GEOM_SICK_SUPER }, { XFS_SICK_RG_BITMAP, XFS_RTGROUP_GEOM_SICK_BITMAP }, { XFS_SICK_RG_SUMMARY, XFS_RTGROUP_GEOM_SICK_SUMMARY }, { XFS_SICK_RG_RMAPBT, XFS_RTGROUP_GEOM_SICK_RMAPBT }, { XFS_SICK_RG_REFCNTBT, XFS_RTGROUP_GEOM_SICK_REFCNTBT }, }; /* Fill out rtgroup geometry health info. */ void xfs_rtgroup_geom_health( struct xfs_rtgroup *rtg, struct xfs_rtgroup_geometry *rgeo) { const struct ioctl_sick_map *m; unsigned int sick; unsigned int checked; rgeo->rg_sick = 0; rgeo->rg_checked = 0; xfs_group_measure_sickness(rtg_group(rtg), &sick, &checked); for_each_sick_map(rtgroup_map, m) { if (checked & m->sick_mask) rgeo->rg_checked |= m->ioctl_mask; if (sick & m->sick_mask) rgeo->rg_sick |= m->ioctl_mask; } } static const struct ioctl_sick_map ino_map[] = { { XFS_SICK_INO_CORE, XFS_BS_SICK_INODE }, { XFS_SICK_INO_BMBTD, XFS_BS_SICK_BMBTD }, { XFS_SICK_INO_BMBTA, XFS_BS_SICK_BMBTA }, { XFS_SICK_INO_BMBTC, XFS_BS_SICK_BMBTC }, { XFS_SICK_INO_DIR, XFS_BS_SICK_DIR }, { XFS_SICK_INO_XATTR, XFS_BS_SICK_XATTR }, { XFS_SICK_INO_SYMLINK, XFS_BS_SICK_SYMLINK }, { XFS_SICK_INO_PARENT, XFS_BS_SICK_PARENT }, { XFS_SICK_INO_BMBTD_ZAPPED, XFS_BS_SICK_BMBTD }, { XFS_SICK_INO_BMBTA_ZAPPED, XFS_BS_SICK_BMBTA }, { XFS_SICK_INO_DIR_ZAPPED, XFS_BS_SICK_DIR }, { XFS_SICK_INO_SYMLINK_ZAPPED, XFS_BS_SICK_SYMLINK }, { XFS_SICK_INO_DIRTREE, XFS_BS_SICK_DIRTREE }, }; /* Fill out bulkstat health info. */ void xfs_bulkstat_health( struct xfs_inode *ip, struct xfs_bulkstat *bs) { const struct ioctl_sick_map *m; unsigned int sick; unsigned int checked; bs->bs_sick = 0; bs->bs_checked = 0; xfs_inode_measure_sickness(ip, &sick, &checked); for_each_sick_map(ino_map, m) { if (checked & m->sick_mask) bs->bs_checked |= m->ioctl_mask; if (sick & m->sick_mask) bs->bs_sick |= m->ioctl_mask; } } /* Mark a block mapping sick. */ void xfs_bmap_mark_sick( struct xfs_inode *ip, int whichfork) { unsigned int mask; switch (whichfork) { case XFS_DATA_FORK: mask = XFS_SICK_INO_BMBTD; break; case XFS_ATTR_FORK: mask = XFS_SICK_INO_BMBTA; break; case XFS_COW_FORK: mask = XFS_SICK_INO_BMBTC; break; default: ASSERT(0); return; } xfs_inode_mark_sick(ip, mask); } /* Record observations of btree corruption with the health tracking system. */ void xfs_btree_mark_sick( struct xfs_btree_cur *cur) { if (xfs_btree_is_bmap(cur->bc_ops)) { xfs_bmap_mark_sick(cur->bc_ino.ip, cur->bc_ino.whichfork); /* no health state tracking for ephemeral btrees */ } else if (cur->bc_ops->type != XFS_BTREE_TYPE_MEM) { ASSERT(cur->bc_group); ASSERT(cur->bc_ops->sick_mask); xfs_group_mark_sick(cur->bc_group, cur->bc_ops->sick_mask); } } /* * Record observations of dir/attr btree corruption with the health tracking * system. */ void xfs_dirattr_mark_sick( struct xfs_inode *ip, int whichfork) { unsigned int mask; switch (whichfork) { case XFS_DATA_FORK: mask = XFS_SICK_INO_DIR; break; case XFS_ATTR_FORK: mask = XFS_SICK_INO_XATTR; break; default: ASSERT(0); return; } xfs_inode_mark_sick(ip, mask); } /* * Record observations of dir/attr btree corruption with the health tracking * system. */ void xfs_da_mark_sick( struct xfs_da_args *args) { xfs_dirattr_mark_sick(args->dp, args->whichfork); }
41 27 13 32 6 34 5 30 56 20 69 5481 5469 19 65 13 64 59 15 11 2 2 13 13 6 7 13 13 13 7 4 3 2 1 3 2 1 10 10 2 8 10 3 3 1 3 2 5 2 1 1 1 2 2 2 1 2 2 2 1 8 1 1 416 213 2 12 5 4 2 5 2 41 2 51 3 89 2 11 7 13 3 2 9 11 44 29 1 8 14 13 1 428 21 399 14 413 417 91 91 91 86 2 3 91 85 2 61 21 1 91 2 81 1 80 81 8 3 9 112 98 1 5 1 4 5 9 6 222 198 32 33 16 3 66 60 8 42 3 258 221 44 88 35 53 88 2117 2037 87 88 88 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/fcntl.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/syscalls.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/sched/task.h> #include <linux/fs.h> #include <linux/filelock.h> #include <linux/file.h> #include <linux/capability.h> #include <linux/dnotify.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pipe_fs_i.h> #include <linux/security.h> #include <linux/ptrace.h> #include <linux/signal.h> #include <linux/rcupdate.h> #include <linux/pid_namespace.h> #include <linux/user_namespace.h> #include <linux/memfd.h> #include <linux/compat.h> #include <linux/mount.h> #include <linux/rw_hint.h> #include <linux/poll.h> #include <asm/siginfo.h> #include <linux/uaccess.h> #include "internal.h" #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) static int setfl(int fd, struct file * filp, unsigned int arg) { struct inode * inode = file_inode(filp); int error = 0; /* * O_APPEND cannot be cleared if the file is marked as append-only * and the file is open for write. */ if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode)) return -EPERM; /* O_NOATIME can only be set by the owner or superuser */ if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME)) if (!inode_owner_or_capable(file_mnt_idmap(filp), inode)) return -EPERM; /* required for strict SunOS emulation */ if (O_NONBLOCK != O_NDELAY) if (arg & O_NDELAY) arg |= O_NONBLOCK; /* Pipe packetized mode is controlled by O_DIRECT flag */ if (!S_ISFIFO(inode->i_mode) && (arg & O_DIRECT) && !(filp->f_mode & FMODE_CAN_ODIRECT)) return -EINVAL; if (filp->f_op->check_flags) error = filp->f_op->check_flags(arg); if (error) return error; /* * ->fasync() is responsible for setting the FASYNC bit. */ if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op->fasync) { error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0); if (error < 0) goto out; if (error > 0) error = 0; } spin_lock(&filp->f_lock); filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK); filp->f_iocb_flags = iocb_flags(filp); spin_unlock(&filp->f_lock); out: return error; } /* * Allocate an file->f_owner struct if it doesn't exist, handling racing * allocations correctly. */ int file_f_owner_allocate(struct file *file) { struct fown_struct *f_owner; f_owner = file_f_owner(file); if (f_owner) return 0; f_owner = kzalloc(sizeof(struct fown_struct), GFP_KERNEL); if (!f_owner) return -ENOMEM; rwlock_init(&f_owner->lock); f_owner->file = file; /* If someone else raced us, drop our allocation. */ if (unlikely(cmpxchg(&file->f_owner, NULL, f_owner))) kfree(f_owner); return 0; } EXPORT_SYMBOL(file_f_owner_allocate); void file_f_owner_release(struct file *file) { struct fown_struct *f_owner; f_owner = file_f_owner(file); if (f_owner) { put_pid(f_owner->pid); kfree(f_owner); } } void __f_setown(struct file *filp, struct pid *pid, enum pid_type type, int force) { struct fown_struct *f_owner; f_owner = file_f_owner(filp); if (WARN_ON_ONCE(!f_owner)) return; write_lock_irq(&f_owner->lock); if (force || !f_owner->pid) { put_pid(f_owner->pid); f_owner->pid = get_pid(pid); f_owner->pid_type = type; if (pid) { const struct cred *cred = current_cred(); security_file_set_fowner(filp); f_owner->uid = cred->uid; f_owner->euid = cred->euid; } } write_unlock_irq(&f_owner->lock); } EXPORT_SYMBOL(__f_setown); int f_setown(struct file *filp, int who, int force) { enum pid_type type; struct pid *pid = NULL; int ret = 0; might_sleep(); type = PIDTYPE_TGID; if (who < 0) { /* avoid overflow below */ if (who == INT_MIN) return -EINVAL; type = PIDTYPE_PGID; who = -who; } ret = file_f_owner_allocate(filp); if (ret) return ret; rcu_read_lock(); if (who) { pid = find_vpid(who); if (!pid) ret = -ESRCH; } if (!ret) __f_setown(filp, pid, type, force); rcu_read_unlock(); return ret; } EXPORT_SYMBOL(f_setown); void f_delown(struct file *filp) { __f_setown(filp, NULL, PIDTYPE_TGID, 1); } pid_t f_getown(struct file *filp) { pid_t pid = 0; struct fown_struct *f_owner; f_owner = file_f_owner(filp); if (!f_owner) return pid; read_lock_irq(&f_owner->lock); rcu_read_lock(); if (pid_task(f_owner->pid, f_owner->pid_type)) { pid = pid_vnr(f_owner->pid); if (f_owner->pid_type == PIDTYPE_PGID) pid = -pid; } rcu_read_unlock(); read_unlock_irq(&f_owner->lock); return pid; } static int f_setown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex __user *owner_p = (void __user *)arg; struct f_owner_ex owner; struct pid *pid; int type; int ret; ret = copy_from_user(&owner, owner_p, sizeof(owner)); if (ret) return -EFAULT; switch (owner.type) { case F_OWNER_TID: type = PIDTYPE_PID; break; case F_OWNER_PID: type = PIDTYPE_TGID; break; case F_OWNER_PGRP: type = PIDTYPE_PGID; break; default: return -EINVAL; } ret = file_f_owner_allocate(filp); if (ret) return ret; rcu_read_lock(); pid = find_vpid(owner.pid); if (owner.pid && !pid) ret = -ESRCH; else __f_setown(filp, pid, type, 1); rcu_read_unlock(); return ret; } static int f_getown_ex(struct file *filp, unsigned long arg) { struct f_owner_ex __user *owner_p = (void __user *)arg; struct f_owner_ex owner = {}; int ret = 0; struct fown_struct *f_owner; enum pid_type pid_type = PIDTYPE_PID; f_owner = file_f_owner(filp); if (f_owner) { read_lock_irq(&f_owner->lock); rcu_read_lock(); if (pid_task(f_owner->pid, f_owner->pid_type)) owner.pid = pid_vnr(f_owner->pid); rcu_read_unlock(); pid_type = f_owner->pid_type; } switch (pid_type) { case PIDTYPE_PID: owner.type = F_OWNER_TID; break; case PIDTYPE_TGID: owner.type = F_OWNER_PID; break; case PIDTYPE_PGID: owner.type = F_OWNER_PGRP; break; default: WARN_ON(1); ret = -EINVAL; break; } if (f_owner) read_unlock_irq(&f_owner->lock); if (!ret) { ret = copy_to_user(owner_p, &owner, sizeof(owner)); if (ret) ret = -EFAULT; } return ret; } #ifdef CONFIG_CHECKPOINT_RESTORE static int f_getowner_uids(struct file *filp, unsigned long arg) { struct user_namespace *user_ns = current_user_ns(); struct fown_struct *f_owner; uid_t __user *dst = (void __user *)arg; uid_t src[2] = {0, 0}; int err; f_owner = file_f_owner(filp); if (f_owner) { read_lock_irq(&f_owner->lock); src[0] = from_kuid(user_ns, f_owner->uid); src[1] = from_kuid(user_ns, f_owner->euid); read_unlock_irq(&f_owner->lock); } err = put_user(src[0], &dst[0]); err |= put_user(src[1], &dst[1]); return err; } #else static int f_getowner_uids(struct file *filp, unsigned long arg) { return -EINVAL; } #endif static bool rw_hint_valid(u64 hint) { BUILD_BUG_ON(WRITE_LIFE_NOT_SET != RWH_WRITE_LIFE_NOT_SET); BUILD_BUG_ON(WRITE_LIFE_NONE != RWH_WRITE_LIFE_NONE); BUILD_BUG_ON(WRITE_LIFE_SHORT != RWH_WRITE_LIFE_SHORT); BUILD_BUG_ON(WRITE_LIFE_MEDIUM != RWH_WRITE_LIFE_MEDIUM); BUILD_BUG_ON(WRITE_LIFE_LONG != RWH_WRITE_LIFE_LONG); BUILD_BUG_ON(WRITE_LIFE_EXTREME != RWH_WRITE_LIFE_EXTREME); switch (hint) { case RWH_WRITE_LIFE_NOT_SET: case RWH_WRITE_LIFE_NONE: case RWH_WRITE_LIFE_SHORT: case RWH_WRITE_LIFE_MEDIUM: case RWH_WRITE_LIFE_LONG: case RWH_WRITE_LIFE_EXTREME: return true; default: return false; } } static long fcntl_get_rw_hint(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); u64 __user *argp = (u64 __user *)arg; u64 hint = READ_ONCE(inode->i_write_hint); if (copy_to_user(argp, &hint, sizeof(*argp))) return -EFAULT; return 0; } static long fcntl_set_rw_hint(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); u64 __user *argp = (u64 __user *)arg; u64 hint; if (!inode_owner_or_capable(file_mnt_idmap(file), inode)) return -EPERM; if (copy_from_user(&hint, argp, sizeof(hint))) return -EFAULT; if (!rw_hint_valid(hint)) return -EINVAL; WRITE_ONCE(inode->i_write_hint, hint); /* * file->f_mapping->host may differ from inode. As an example, * blkdev_open() modifies file->f_mapping. */ if (file->f_mapping->host != inode) WRITE_ONCE(file->f_mapping->host->i_write_hint, hint); return 0; } /* Is the file descriptor a dup of the file? */ static long f_dupfd_query(int fd, struct file *filp) { CLASS(fd_raw, f)(fd); if (fd_empty(f)) return -EBADF; /* * We can do the 'fdput()' immediately, as the only thing that * matters is the pointer value which isn't changed by the fdput. * * Technically we didn't need a ref at all, and 'fdget()' was * overkill, but given our lockless file pointer lookup, the * alternatives are complicated. */ return fd_file(f) == filp; } /* Let the caller figure out whether a given file was just created. */ static long f_created_query(const struct file *filp) { return !!(filp->f_mode & FMODE_CREATED); } static int f_owner_sig(struct file *filp, int signum, bool setsig) { int ret = 0; struct fown_struct *f_owner; might_sleep(); if (setsig) { if (!valid_signal(signum)) return -EINVAL; ret = file_f_owner_allocate(filp); if (ret) return ret; } f_owner = file_f_owner(filp); if (setsig) f_owner->signum = signum; else if (f_owner) ret = f_owner->signum; return ret; } static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, struct file *filp) { void __user *argp = (void __user *)arg; int argi = (int)arg; struct flock flock; long err = -EINVAL; switch (cmd) { case F_CREATED_QUERY: err = f_created_query(filp); break; case F_DUPFD: err = f_dupfd(argi, filp, 0); break; case F_DUPFD_CLOEXEC: err = f_dupfd(argi, filp, O_CLOEXEC); break; case F_DUPFD_QUERY: err = f_dupfd_query(argi, filp); break; case F_GETFD: err = get_close_on_exec(fd) ? FD_CLOEXEC : 0; break; case F_SETFD: err = 0; set_close_on_exec(fd, argi & FD_CLOEXEC); break; case F_GETFL: err = filp->f_flags; break; case F_SETFL: err = setfl(fd, filp, argi); break; #if BITS_PER_LONG != 32 /* 32-bit arches must use fcntl64() */ case F_OFD_GETLK: #endif case F_GETLK: if (copy_from_user(&flock, argp, sizeof(flock))) return -EFAULT; err = fcntl_getlk(filp, cmd, &flock); if (!err && copy_to_user(argp, &flock, sizeof(flock))) return -EFAULT; break; #if BITS_PER_LONG != 32 /* 32-bit arches must use fcntl64() */ case F_OFD_SETLK: case F_OFD_SETLKW: fallthrough; #endif case F_SETLK: case F_SETLKW: if (copy_from_user(&flock, argp, sizeof(flock))) return -EFAULT; err = fcntl_setlk(fd, filp, cmd, &flock); break; case F_GETOWN: /* * XXX If f_owner is a process group, the * negative return value will get converted * into an error. Oops. If we keep the * current syscall conventions, the only way * to fix this will be in libc. */ err = f_getown(filp); force_successful_syscall_return(); break; case F_SETOWN: err = f_setown(filp, argi, 1); break; case F_GETOWN_EX: err = f_getown_ex(filp, arg); break; case F_SETOWN_EX: err = f_setown_ex(filp, arg); break; case F_GETOWNER_UIDS: err = f_getowner_uids(filp, arg); break; case F_GETSIG: err = f_owner_sig(filp, 0, false); break; case F_SETSIG: err = f_owner_sig(filp, argi, true); break; case F_GETLEASE: err = fcntl_getlease(filp); break; case F_SETLEASE: err = fcntl_setlease(fd, filp, argi); break; case F_NOTIFY: err = fcntl_dirnotify(fd, filp, argi); break; case F_SETPIPE_SZ: case F_GETPIPE_SZ: err = pipe_fcntl(filp, cmd, argi); break; case F_ADD_SEALS: case F_GET_SEALS: err = memfd_fcntl(filp, cmd, argi); break; case F_GET_RW_HINT: err = fcntl_get_rw_hint(filp, cmd, arg); break; case F_SET_RW_HINT: err = fcntl_set_rw_hint(filp, cmd, arg); break; default: break; } return err; } static int check_fcntl_cmd(unsigned cmd) { switch (cmd) { case F_CREATED_QUERY: case F_DUPFD: case F_DUPFD_CLOEXEC: case F_DUPFD_QUERY: case F_GETFD: case F_SETFD: case F_GETFL: return 1; } return 0; } SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { CLASS(fd_raw, f)(fd); long err; if (fd_empty(f)) return -EBADF; if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) return -EBADF; } err = security_file_fcntl(fd_file(f), cmd, arg); if (!err) err = do_fcntl(fd, cmd, arg, fd_file(f)); return err; } #if BITS_PER_LONG == 32 SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { void __user *argp = (void __user *)arg; CLASS(fd_raw, f)(fd); struct flock64 flock; long err; if (fd_empty(f)) return -EBADF; if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) return -EBADF; } err = security_file_fcntl(fd_file(f), cmd, arg); if (err) return err; switch (cmd) { case F_GETLK64: case F_OFD_GETLK: err = -EFAULT; if (copy_from_user(&flock, argp, sizeof(flock))) break; err = fcntl_getlk64(fd_file(f), cmd, &flock); if (!err && copy_to_user(argp, &flock, sizeof(flock))) err = -EFAULT; break; case F_SETLK64: case F_SETLKW64: case F_OFD_SETLK: case F_OFD_SETLKW: err = -EFAULT; if (copy_from_user(&flock, argp, sizeof(flock))) break; err = fcntl_setlk64(fd, fd_file(f), cmd, &flock); break; default: err = do_fcntl(fd, cmd, arg, fd_file(f)); break; } return err; } #endif #ifdef CONFIG_COMPAT /* careful - don't use anywhere else */ #define copy_flock_fields(dst, src) \ (dst)->l_type = (src)->l_type; \ (dst)->l_whence = (src)->l_whence; \ (dst)->l_start = (src)->l_start; \ (dst)->l_len = (src)->l_len; \ (dst)->l_pid = (src)->l_pid; static int get_compat_flock(struct flock *kfl, const struct compat_flock __user *ufl) { struct compat_flock fl; if (copy_from_user(&fl, ufl, sizeof(struct compat_flock))) return -EFAULT; copy_flock_fields(kfl, &fl); return 0; } static int get_compat_flock64(struct flock *kfl, const struct compat_flock64 __user *ufl) { struct compat_flock64 fl; if (copy_from_user(&fl, ufl, sizeof(struct compat_flock64))) return -EFAULT; copy_flock_fields(kfl, &fl); return 0; } static int put_compat_flock(const struct flock *kfl, struct compat_flock __user *ufl) { struct compat_flock fl; memset(&fl, 0, sizeof(struct compat_flock)); copy_flock_fields(&fl, kfl); if (copy_to_user(ufl, &fl, sizeof(struct compat_flock))) return -EFAULT; return 0; } static int put_compat_flock64(const struct flock *kfl, struct compat_flock64 __user *ufl) { struct compat_flock64 fl; BUILD_BUG_ON(sizeof(kfl->l_start) > sizeof(ufl->l_start)); BUILD_BUG_ON(sizeof(kfl->l_len) > sizeof(ufl->l_len)); memset(&fl, 0, sizeof(struct compat_flock64)); copy_flock_fields(&fl, kfl); if (copy_to_user(ufl, &fl, sizeof(struct compat_flock64))) return -EFAULT; return 0; } #undef copy_flock_fields static unsigned int convert_fcntl_cmd(unsigned int cmd) { switch (cmd) { case F_GETLK64: return F_GETLK; case F_SETLK64: return F_SETLK; case F_SETLKW64: return F_SETLKW; } return cmd; } /* * GETLK was successful and we need to return the data, but it needs to fit in * the compat structure. * l_start shouldn't be too big, unless the original start + end is greater than * COMPAT_OFF_T_MAX, in which case the app was asking for trouble, so we return * -EOVERFLOW in that case. l_len could be too big, in which case we just * truncate it, and only allow the app to see that part of the conflicting lock * that might make sense to it anyway */ static int fixup_compat_flock(struct flock *flock) { if (flock->l_start > COMPAT_OFF_T_MAX) return -EOVERFLOW; if (flock->l_len > COMPAT_OFF_T_MAX) flock->l_len = COMPAT_OFF_T_MAX; return 0; } static long do_compat_fcntl64(unsigned int fd, unsigned int cmd, compat_ulong_t arg) { CLASS(fd_raw, f)(fd); struct flock flock; long err; if (fd_empty(f)) return -EBADF; if (unlikely(fd_file(f)->f_mode & FMODE_PATH)) { if (!check_fcntl_cmd(cmd)) return -EBADF; } err = security_file_fcntl(fd_file(f), cmd, arg); if (err) return err; switch (cmd) { case F_GETLK: err = get_compat_flock(&flock, compat_ptr(arg)); if (err) break; err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock); if (err) break; err = fixup_compat_flock(&flock); if (!err) err = put_compat_flock(&flock, compat_ptr(arg)); break; case F_GETLK64: case F_OFD_GETLK: err = get_compat_flock64(&flock, compat_ptr(arg)); if (err) break; err = fcntl_getlk(fd_file(f), convert_fcntl_cmd(cmd), &flock); if (!err) err = put_compat_flock64(&flock, compat_ptr(arg)); break; case F_SETLK: case F_SETLKW: err = get_compat_flock(&flock, compat_ptr(arg)); if (err) break; err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock); break; case F_SETLK64: case F_SETLKW64: case F_OFD_SETLK: case F_OFD_SETLKW: err = get_compat_flock64(&flock, compat_ptr(arg)); if (err) break; err = fcntl_setlk(fd, fd_file(f), convert_fcntl_cmd(cmd), &flock); break; default: err = do_fcntl(fd, cmd, arg, fd_file(f)); break; } return err; } COMPAT_SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd, compat_ulong_t, arg) { return do_compat_fcntl64(fd, cmd, arg); } COMPAT_SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, compat_ulong_t, arg) { switch (cmd) { case F_GETLK64: case F_SETLK64: case F_SETLKW64: case F_OFD_GETLK: case F_OFD_SETLK: case F_OFD_SETLKW: return -EINVAL; } return do_compat_fcntl64(fd, cmd, arg); } #endif /* Table to convert sigio signal codes into poll band bitmaps */ static const __poll_t band_table[NSIGPOLL] = { EPOLLIN | EPOLLRDNORM, /* POLL_IN */ EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND, /* POLL_OUT */ EPOLLIN | EPOLLRDNORM | EPOLLMSG, /* POLL_MSG */ EPOLLERR, /* POLL_ERR */ EPOLLPRI | EPOLLRDBAND, /* POLL_PRI */ EPOLLHUP | EPOLLERR /* POLL_HUP */ }; static inline int sigio_perm(struct task_struct *p, struct fown_struct *fown, int sig) { const struct cred *cred; int ret; rcu_read_lock(); cred = __task_cred(p); ret = ((uid_eq(fown->euid, GLOBAL_ROOT_UID) || uid_eq(fown->euid, cred->suid) || uid_eq(fown->euid, cred->uid) || uid_eq(fown->uid, cred->suid) || uid_eq(fown->uid, cred->uid)) && !security_file_send_sigiotask(p, fown, sig)); rcu_read_unlock(); return ret; } static void send_sigio_to_task(struct task_struct *p, struct fown_struct *fown, int fd, int reason, enum pid_type type) { /* * F_SETSIG can change ->signum lockless in parallel, make * sure we read it once and use the same value throughout. */ int signum = READ_ONCE(fown->signum); if (!sigio_perm(p, fown, signum)) return; switch (signum) { default: { kernel_siginfo_t si; /* Queue a rt signal with the appropriate fd as its value. We use SI_SIGIO as the source, not SI_KERNEL, since kernel signals always get delivered even if we can't queue. Failure to queue in this case _should_ be reported; we fall back to SIGIO in that case. --sct */ clear_siginfo(&si); si.si_signo = signum; si.si_errno = 0; si.si_code = reason; /* * Posix definies POLL_IN and friends to be signal * specific si_codes for SIG_POLL. Linux extended * these si_codes to other signals in a way that is * ambiguous if other signals also have signal * specific si_codes. In that case use SI_SIGIO instead * to remove the ambiguity. */ if ((signum != SIGPOLL) && sig_specific_sicodes(signum)) si.si_code = SI_SIGIO; /* Make sure we are called with one of the POLL_* reasons, otherwise we could leak kernel stack into userspace. */ BUG_ON((reason < POLL_IN) || ((reason - POLL_IN) >= NSIGPOLL)); if (reason - POLL_IN >= NSIGPOLL) si.si_band = ~0L; else si.si_band = mangle_poll(band_table[reason - POLL_IN]); si.si_fd = fd; if (!do_send_sig_info(signum, &si, p, type)) break; } fallthrough; /* fall back on the old plain SIGIO signal */ case 0: do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type); } } void send_sigio(struct fown_struct *fown, int fd, int band) { struct task_struct *p; enum pid_type type; unsigned long flags; struct pid *pid; read_lock_irqsave(&fown->lock, flags); type = fown->pid_type; pid = fown->pid; if (!pid) goto out_unlock_fown; if (type <= PIDTYPE_TGID) { rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (p) send_sigio_to_task(p, fown, fd, band, type); rcu_read_unlock(); } else { read_lock(&tasklist_lock); do_each_pid_task(pid, type, p) { send_sigio_to_task(p, fown, fd, band, type); } while_each_pid_task(pid, type, p); read_unlock(&tasklist_lock); } out_unlock_fown: read_unlock_irqrestore(&fown->lock, flags); } static void send_sigurg_to_task(struct task_struct *p, struct fown_struct *fown, enum pid_type type) { if (sigio_perm(p, fown, SIGURG)) do_send_sig_info(SIGURG, SEND_SIG_PRIV, p, type); } int send_sigurg(struct file *file) { struct fown_struct *fown; struct task_struct *p; enum pid_type type; struct pid *pid; unsigned long flags; int ret = 0; fown = file_f_owner(file); if (!fown) return 0; read_lock_irqsave(&fown->lock, flags); type = fown->pid_type; pid = fown->pid; if (!pid) goto out_unlock_fown; ret = 1; if (type <= PIDTYPE_TGID) { rcu_read_lock(); p = pid_task(pid, PIDTYPE_PID); if (p) send_sigurg_to_task(p, fown, type); rcu_read_unlock(); } else { read_lock(&tasklist_lock); do_each_pid_task(pid, type, p) { send_sigurg_to_task(p, fown, type); } while_each_pid_task(pid, type, p); read_unlock(&tasklist_lock); } out_unlock_fown: read_unlock_irqrestore(&fown->lock, flags); return ret; } static DEFINE_SPINLOCK(fasync_lock); static struct kmem_cache *fasync_cache __ro_after_init; /* * Remove a fasync entry. If successfully removed, return * positive and clear the FASYNC flag. If no entry exists, * do nothing and return 0. * * NOTE! It is very important that the FASYNC flag always * match the state "is the filp on a fasync list". * */ int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp) { struct fasync_struct *fa, **fp; int result = 0; spin_lock(&filp->f_lock); spin_lock(&fasync_lock); for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { if (fa->fa_file != filp) continue; write_lock_irq(&fa->fa_lock); fa->fa_file = NULL; write_unlock_irq(&fa->fa_lock); *fp = fa->fa_next; kfree_rcu(fa, fa_rcu); filp->f_flags &= ~FASYNC; result = 1; break; } spin_unlock(&fasync_lock); spin_unlock(&filp->f_lock); return result; } struct fasync_struct *fasync_alloc(void) { return kmem_cache_alloc(fasync_cache, GFP_KERNEL); } /* * NOTE! This can be used only for unused fasync entries: * entries that actually got inserted on the fasync list * need to be released by rcu - see fasync_remove_entry. */ void fasync_free(struct fasync_struct *new) { kmem_cache_free(fasync_cache, new); } /* * Insert a new entry into the fasync list. Return the pointer to the * old one if we didn't use the new one. * * NOTE! It is very important that the FASYNC flag always * match the state "is the filp on a fasync list". */ struct fasync_struct *fasync_insert_entry(int fd, struct file *filp, struct fasync_struct **fapp, struct fasync_struct *new) { struct fasync_struct *fa, **fp; spin_lock(&filp->f_lock); spin_lock(&fasync_lock); for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) { if (fa->fa_file != filp) continue; write_lock_irq(&fa->fa_lock); fa->fa_fd = fd; write_unlock_irq(&fa->fa_lock); goto out; } rwlock_init(&new->fa_lock); new->magic = FASYNC_MAGIC; new->fa_file = filp; new->fa_fd = fd; new->fa_next = *fapp; rcu_assign_pointer(*fapp, new); filp->f_flags |= FASYNC; out: spin_unlock(&fasync_lock); spin_unlock(&filp->f_lock); return fa; } /* * Add a fasync entry. Return negative on error, positive if * added, and zero if did nothing but change an existing one. */ static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp) { struct fasync_struct *new; new = fasync_alloc(); if (!new) return -ENOMEM; /* * fasync_insert_entry() returns the old (update) entry if * it existed. * * So free the (unused) new entry and return 0 to let the * caller know that we didn't add any new fasync entries. */ if (fasync_insert_entry(fd, filp, fapp, new)) { fasync_free(new); return 0; } return 1; } /* * fasync_helper() is used by almost all character device drivers * to set up the fasync queue, and for regular files by the file * lease code. It returns negative on error, 0 if it did no changes * and positive if it added/deleted the entry. */ int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp) { if (!on) return fasync_remove_entry(filp, fapp); return fasync_add_entry(fd, filp, fapp); } EXPORT_SYMBOL(fasync_helper); /* * rcu_read_lock() is held */ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band) { while (fa) { struct fown_struct *fown; unsigned long flags; if (fa->magic != FASYNC_MAGIC) { printk(KERN_ERR "kill_fasync: bad magic number in " "fasync_struct!\n"); return; } read_lock_irqsave(&fa->fa_lock, flags); if (fa->fa_file) { fown = file_f_owner(fa->fa_file); if (!fown) goto next; /* Don't send SIGURG to processes which have not set a queued signum: SIGURG has its own default signalling mechanism. */ if (!(sig == SIGURG && fown->signum == 0)) send_sigio(fown, fa->fa_fd, band); } next: read_unlock_irqrestore(&fa->fa_lock, flags); fa = rcu_dereference(fa->fa_next); } } void kill_fasync(struct fasync_struct **fp, int sig, int band) { /* First a quick test without locking: usually * the list is empty. */ if (*fp) { rcu_read_lock(); kill_fasync_rcu(rcu_dereference(*fp), sig, band); rcu_read_unlock(); } } EXPORT_SYMBOL(kill_fasync); static int __init fcntl_init(void) { /* * Please add new bits here to ensure allocation uniqueness. * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) | __FMODE_EXEC)); fasync_cache = kmem_cache_create("fasync_cache", sizeof(struct fasync_struct), 0, SLAB_PANIC | SLAB_ACCOUNT, NULL); return 0; } module_init(fcntl_init)
125 125 110 110 68 68 65 3 65 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 // SPDX-License-Identifier: GPL-2.0-or-later /* * ALSA timer back-end using hrtimer * Copyright (C) 2008 Takashi Iwai */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/hrtimer.h> #include <sound/core.h> #include <sound/timer.h> MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ALSA hrtimer backend"); MODULE_LICENSE("GPL"); MODULE_ALIAS("snd-timer-" __stringify(SNDRV_TIMER_GLOBAL_HRTIMER)); #define NANO_SEC 1000000000UL /* 10^9 in sec */ static unsigned int resolution; struct snd_hrtimer { struct snd_timer *timer; struct hrtimer hrt; bool in_callback; }; static enum hrtimer_restart snd_hrtimer_callback(struct hrtimer *hrt) { struct snd_hrtimer *stime = container_of(hrt, struct snd_hrtimer, hrt); struct snd_timer *t = stime->timer; ktime_t delta; unsigned long ticks; enum hrtimer_restart ret = HRTIMER_NORESTART; scoped_guard(spinlock, &t->lock) { if (!t->running) return HRTIMER_NORESTART; /* fast path */ stime->in_callback = true; ticks = t->sticks; } /* calculate the drift */ delta = ktime_sub(hrt->base->get_time(), hrtimer_get_expires(hrt)); if (delta > 0) ticks += ktime_divns(delta, ticks * resolution); snd_timer_interrupt(stime->timer, ticks); guard(spinlock)(&t->lock); if (t->running) { hrtimer_add_expires_ns(hrt, t->sticks * resolution); ret = HRTIMER_RESTART; } stime->in_callback = false; return ret; } static int snd_hrtimer_open(struct snd_timer *t) { struct snd_hrtimer *stime; stime = kzalloc(sizeof(*stime), GFP_KERNEL); if (!stime) return -ENOMEM; stime->timer = t; hrtimer_setup(&stime->hrt, snd_hrtimer_callback, CLOCK_MONOTONIC, HRTIMER_MODE_REL); t->private_data = stime; return 0; } static int snd_hrtimer_close(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime) { scoped_guard(spinlock_irq, &t->lock) { t->running = 0; /* just to be sure */ stime->in_callback = 1; /* skip start/stop */ } hrtimer_cancel(&stime->hrt); kfree(stime); t->private_data = NULL; } return 0; } static int snd_hrtimer_start(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime->in_callback) return 0; hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution), HRTIMER_MODE_REL); return 0; } static int snd_hrtimer_stop(struct snd_timer *t) { struct snd_hrtimer *stime = t->private_data; if (stime->in_callback) return 0; hrtimer_try_to_cancel(&stime->hrt); return 0; } static const struct snd_timer_hardware hrtimer_hw __initconst = { .flags = SNDRV_TIMER_HW_AUTO | SNDRV_TIMER_HW_WORK, .open = snd_hrtimer_open, .close = snd_hrtimer_close, .start = snd_hrtimer_start, .stop = snd_hrtimer_stop, }; /* * entry functions */ static struct snd_timer *mytimer; static int __init snd_hrtimer_init(void) { struct snd_timer *timer; int err; resolution = hrtimer_resolution; /* Create a new timer and set up the fields */ err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER, &timer); if (err < 0) return err; timer->module = THIS_MODULE; strcpy(timer->name, "HR timer"); timer->hw = hrtimer_hw; timer->hw.resolution = resolution; timer->hw.ticks = NANO_SEC / resolution; timer->max_instances = 100; /* lower the limit */ err = snd_timer_global_register(timer); if (err < 0) { snd_timer_global_free(timer); return err; } mytimer = timer; /* remember this */ return 0; } static void __exit snd_hrtimer_exit(void) { if (mytimer) { snd_timer_global_free(mytimer); mytimer = NULL; } } module_init(snd_hrtimer_init); module_exit(snd_hrtimer_exit);
48 47 48 43 48 45 46 46 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 // SPDX-License-Identifier: (GPL-2.0-only OR Apache-2.0) /* * Generic implementation of the BLAKE2b digest algorithm. Based on the BLAKE2b * reference implementation, but it has been heavily modified for use in the * kernel. The reference implementation was: * * Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under * the terms of the CC0, the OpenSSL Licence, or the Apache Public License * 2.0, at your option. The terms of these licenses can be found at: * * - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 * - OpenSSL license : https://www.openssl.org/source/license.html * - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 * * More information about BLAKE2 can be found at https://blake2.net. */ #include <crypto/internal/blake2b.h> #include <crypto/internal/hash.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/unaligned.h> static const u8 blake2b_sigma[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 } }; static void blake2b_increment_counter(struct blake2b_state *S, const u64 inc) { S->t[0] += inc; S->t[1] += (S->t[0] < inc); } #define G(r,i,a,b,c,d) \ do { \ a = a + b + m[blake2b_sigma[r][2*i+0]]; \ d = ror64(d ^ a, 32); \ c = c + d; \ b = ror64(b ^ c, 24); \ a = a + b + m[blake2b_sigma[r][2*i+1]]; \ d = ror64(d ^ a, 16); \ c = c + d; \ b = ror64(b ^ c, 63); \ } while (0) #define ROUND(r) \ do { \ G(r,0,v[ 0],v[ 4],v[ 8],v[12]); \ G(r,1,v[ 1],v[ 5],v[ 9],v[13]); \ G(r,2,v[ 2],v[ 6],v[10],v[14]); \ G(r,3,v[ 3],v[ 7],v[11],v[15]); \ G(r,4,v[ 0],v[ 5],v[10],v[15]); \ G(r,5,v[ 1],v[ 6],v[11],v[12]); \ G(r,6,v[ 2],v[ 7],v[ 8],v[13]); \ G(r,7,v[ 3],v[ 4],v[ 9],v[14]); \ } while (0) static void blake2b_compress_one_generic(struct blake2b_state *S, const u8 block[BLAKE2B_BLOCK_SIZE]) { u64 m[16]; u64 v[16]; size_t i; for (i = 0; i < 16; ++i) m[i] = get_unaligned_le64(block + i * sizeof(m[i])); for (i = 0; i < 8; ++i) v[i] = S->h[i]; v[ 8] = BLAKE2B_IV0; v[ 9] = BLAKE2B_IV1; v[10] = BLAKE2B_IV2; v[11] = BLAKE2B_IV3; v[12] = BLAKE2B_IV4 ^ S->t[0]; v[13] = BLAKE2B_IV5 ^ S->t[1]; v[14] = BLAKE2B_IV6 ^ S->f[0]; v[15] = BLAKE2B_IV7 ^ S->f[1]; ROUND(0); ROUND(1); ROUND(2); ROUND(3); ROUND(4); ROUND(5); ROUND(6); ROUND(7); ROUND(8); ROUND(9); ROUND(10); ROUND(11); #ifdef CONFIG_CC_IS_CLANG #pragma nounroll /* https://llvm.org/pr45803 */ #endif for (i = 0; i < 8; ++i) S->h[i] = S->h[i] ^ v[i] ^ v[i + 8]; } #undef G #undef ROUND static void blake2b_compress_generic(struct blake2b_state *state, const u8 *block, size_t nblocks, u32 inc) { do { blake2b_increment_counter(state, inc); blake2b_compress_one_generic(state, block); block += BLAKE2B_BLOCK_SIZE; } while (--nblocks); } static int crypto_blake2b_update_generic(struct shash_desc *desc, const u8 *in, unsigned int inlen) { return crypto_blake2b_update_bo(desc, in, inlen, blake2b_compress_generic); } static int crypto_blake2b_finup_generic(struct shash_desc *desc, const u8 *in, unsigned int inlen, u8 *out) { return crypto_blake2b_finup(desc, in, inlen, out, blake2b_compress_generic); } #define BLAKE2B_ALG(name, driver_name, digest_size) \ { \ .base.cra_name = name, \ .base.cra_driver_name = driver_name, \ .base.cra_priority = 100, \ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY | \ CRYPTO_AHASH_ALG_BLOCK_ONLY | \ CRYPTO_AHASH_ALG_FINAL_NONZERO, \ .base.cra_blocksize = BLAKE2B_BLOCK_SIZE, \ .base.cra_ctxsize = sizeof(struct blake2b_tfm_ctx), \ .base.cra_module = THIS_MODULE, \ .digestsize = digest_size, \ .setkey = crypto_blake2b_setkey, \ .init = crypto_blake2b_init, \ .update = crypto_blake2b_update_generic, \ .finup = crypto_blake2b_finup_generic, \ .descsize = BLAKE2B_DESC_SIZE, \ .statesize = BLAKE2B_STATE_SIZE, \ } static struct shash_alg blake2b_algs[] = { BLAKE2B_ALG("blake2b-160", "blake2b-160-generic", BLAKE2B_160_HASH_SIZE), BLAKE2B_ALG("blake2b-256", "blake2b-256-generic", BLAKE2B_256_HASH_SIZE), BLAKE2B_ALG("blake2b-384", "blake2b-384-generic", BLAKE2B_384_HASH_SIZE), BLAKE2B_ALG("blake2b-512", "blake2b-512-generic", BLAKE2B_512_HASH_SIZE), }; static int __init blake2b_mod_init(void) { return crypto_register_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); } static void __exit blake2b_mod_fini(void) { crypto_unregister_shashes(blake2b_algs, ARRAY_SIZE(blake2b_algs)); } module_init(blake2b_mod_init); module_exit(blake2b_mod_fini); MODULE_AUTHOR("David Sterba <kdave@kernel.org>"); MODULE_DESCRIPTION("BLAKE2b generic implementation"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("blake2b-160"); MODULE_ALIAS_CRYPTO("blake2b-160-generic"); MODULE_ALIAS_CRYPTO("blake2b-256"); MODULE_ALIAS_CRYPTO("blake2b-256-generic"); MODULE_ALIAS_CRYPTO("blake2b-384"); MODULE_ALIAS_CRYPTO("blake2b-384-generic"); MODULE_ALIAS_CRYPTO("blake2b-512"); MODULE_ALIAS_CRYPTO("blake2b-512-generic");
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_IO_H #define _ASM_X86_IO_H /* * This file contains the definitions for the x86 IO instructions * inb/inw/inl/outb/outw/outl and the "string versions" of the same * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing" * versions of the single-IO instructions (inb_p/inw_p/..). * * This file is not meant to be obfuscating: it's just complicated * to (a) handle it all in a way that makes gcc able to optimize it * as well as possible and (b) trying to avoid writing the same thing * over and over again with slight variations and possibly making a * mistake somewhere. */ /* * Thanks to James van Artsdalen for a better timing-fix than * the two short jumps: using outb's to a nonexistent port seems * to guarantee better timings even on fast machines. * * On the other hand, I'd like to be sure of a non-existent port: * I feel a bit unsafe about using 0x80 (should be safe, though) * * Linus */ /* * Bit simplified and optimized by Jan Hubicka * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999. * * isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added, * isa_read[wl] and isa_write[wl] fixed * - Arnaldo Carvalho de Melo <acme@conectiva.com.br> */ #include <linux/string.h> #include <linux/compiler.h> #include <linux/cc_platform.h> #include <asm/page.h> #include <asm/early_ioremap.h> #include <asm/pgtable_types.h> #include <asm/shared/io.h> #include <asm/special_insns.h> #define build_mmio_read(name, size, type, reg, barrier) \ static inline type name(const volatile void __iomem *addr) \ { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ :"m" (*(volatile type __force *)addr) barrier); return ret; } #define build_mmio_write(name, size, type, reg, barrier) \ static inline void name(type val, volatile void __iomem *addr) \ { asm volatile("mov" size " %0,%1": :reg (val), \ "m" (*(volatile type __force *)addr) barrier); } build_mmio_read(readb, "b", unsigned char, "=q", :"memory") build_mmio_read(readw, "w", unsigned short, "=r", :"memory") build_mmio_read(readl, "l", unsigned int, "=r", :"memory") build_mmio_read(__readb, "b", unsigned char, "=q", ) build_mmio_read(__readw, "w", unsigned short, "=r", ) build_mmio_read(__readl, "l", unsigned int, "=r", ) build_mmio_write(writeb, "b", unsigned char, "q", :"memory") build_mmio_write(writew, "w", unsigned short, "r", :"memory") build_mmio_write(writel, "l", unsigned int, "r", :"memory") build_mmio_write(__writeb, "b", unsigned char, "q", ) build_mmio_write(__writew, "w", unsigned short, "r", ) build_mmio_write(__writel, "l", unsigned int, "r", ) #define readb readb #define readw readw #define readl readl #define readb_relaxed(a) __readb(a) #define readw_relaxed(a) __readw(a) #define readl_relaxed(a) __readl(a) #define __raw_readb __readb #define __raw_readw __readw #define __raw_readl __readl #define writeb writeb #define writew writew #define writel writel #define writeb_relaxed(v, a) __writeb(v, a) #define writew_relaxed(v, a) __writew(v, a) #define writel_relaxed(v, a) __writel(v, a) #define __raw_writeb __writeb #define __raw_writew __writew #define __raw_writel __writel #ifdef CONFIG_X86_64 build_mmio_read(readq, "q", u64, "=r", :"memory") build_mmio_read(__readq, "q", u64, "=r", ) build_mmio_write(writeq, "q", u64, "r", :"memory") build_mmio_write(__writeq, "q", u64, "r", ) #define readq_relaxed(a) __readq(a) #define writeq_relaxed(v, a) __writeq(v, a) #define __raw_readq __readq #define __raw_writeq __writeq /* Let people know that we have them */ #define readq readq #define writeq writeq #endif #define ARCH_HAS_VALID_PHYS_ADDR_RANGE extern int valid_phys_addr_range(phys_addr_t addr, size_t size); extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); /** * virt_to_phys - map virtual addresses to physical * @address: address to remap * * The returned physical address is the physical (CPU) mapping for * the memory address given. It is only valid to use this function on * addresses directly mapped or allocated via kmalloc. * * This function does not give bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline phys_addr_t virt_to_phys(volatile void *address) { return __pa(address); } #define virt_to_phys virt_to_phys /** * phys_to_virt - map physical address to virtual * @address: address to remap * * The returned virtual address is a current CPU mapping for * the memory address given. It is only valid to use this function on * addresses that have a kernel mapping * * This function does not handle bus mappings for DMA transfers. In * almost all conceivable cases a device driver should not be using * this function */ static inline void *phys_to_virt(phys_addr_t address) { return __va(address); } #define phys_to_virt phys_to_virt /* * ISA I/O bus memory addresses are 1:1 with the physical address. * However, we truncate the address to unsigned int to avoid undesirable * promotions in legacy drivers. */ static inline unsigned int isa_virt_to_bus(volatile void *address) { return (unsigned int)virt_to_phys(address); } #define isa_bus_to_virt phys_to_virt /* * The default ioremap() behavior is non-cached; if you need something * else, you probably want one of the following. */ extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size); #define ioremap_uc ioremap_uc extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size); #define ioremap_cache ioremap_cache extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, pgprot_t prot); #define ioremap_prot ioremap_prot extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size); #define ioremap_encrypted ioremap_encrypted void *arch_memremap_wb(phys_addr_t phys_addr, size_t size, unsigned long flags); #define arch_memremap_wb arch_memremap_wb /** * ioremap - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map * * ioremap performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. * * If the area you are trying to map is a PCI BAR you should have a * look at pci_iomap(). */ void __iomem *ioremap(resource_size_t offset, unsigned long size); #define ioremap ioremap extern void iounmap(volatile void __iomem *addr); #define iounmap iounmap #ifdef __KERNEL__ void memcpy_fromio(void *, const volatile void __iomem *, size_t); void memcpy_toio(volatile void __iomem *, const void *, size_t); void memset_io(volatile void __iomem *, int, size_t); #define memcpy_fromio memcpy_fromio #define memcpy_toio memcpy_toio #define memset_io memset_io #ifdef CONFIG_X86_64 /* * Commit 0f07496144c2 ("[PATCH] Add faster __iowrite32_copy routine for * x86_64") says that circa 2006 rep movsl is noticeably faster than a copy * loop. */ static inline void __iowrite32_copy(void __iomem *to, const void *from, size_t count) { asm volatile("rep movsl" : "=&c"(count), "=&D"(to), "=&S"(from) : "0"(count), "1"(to), "2"(from) : "memory"); } #define __iowrite32_copy __iowrite32_copy #endif /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped * to PAGE_OFFSET is pure coincidence - it does not mean ISA values * are physical addresses. The following constant pointer can be * used as the IO-area pointer (it can be iounmapped as well, so the * analogy with PCI is quite large): */ #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET)) #endif /* __KERNEL__ */ extern void native_io_delay(void); extern int io_delay_type; extern void io_delay_init(void); #if defined(CONFIG_PARAVIRT) #include <asm/paravirt.h> #else static inline void slow_down_io(void) { native_io_delay(); #ifdef REALLY_SLOW_IO native_io_delay(); native_io_delay(); native_io_delay(); #endif } #endif #define BUILDIO(bwl, type) \ static inline void out##bwl##_p(type value, u16 port) \ { \ out##bwl(value, port); \ slow_down_io(); \ } \ \ static inline type in##bwl##_p(u16 port) \ { \ type value = in##bwl(port); \ slow_down_io(); \ return value; \ } \ \ static inline void outs##bwl(u16 port, const void *addr, unsigned long count) \ { \ if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \ type *value = (type *)addr; \ while (count) { \ out##bwl(*value, port); \ value++; \ count--; \ } \ } else { \ asm volatile("rep outs" #bwl \ : "+S"(addr), "+c"(count) \ : "d"(port) : "memory"); \ } \ } \ \ static inline void ins##bwl(u16 port, void *addr, unsigned long count) \ { \ if (cc_platform_has(CC_ATTR_GUEST_UNROLL_STRING_IO)) { \ type *value = (type *)addr; \ while (count) { \ *value = in##bwl(port); \ value++; \ count--; \ } \ } else { \ asm volatile("rep ins" #bwl \ : "+D"(addr), "+c"(count) \ : "d"(port) : "memory"); \ } \ } BUILDIO(b, u8) BUILDIO(w, u16) BUILDIO(l, u32) #undef BUILDIO #define inb_p inb_p #define inw_p inw_p #define inl_p inl_p #define insb insb #define insw insw #define insl insl #define outb_p outb_p #define outw_p outw_p #define outl_p outl_p #define outsb outsb #define outsw outsw #define outsl outsl extern void *xlate_dev_mem_ptr(phys_addr_t phys); extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr); #define xlate_dev_mem_ptr xlate_dev_mem_ptr #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr extern int ioremap_change_attr(unsigned long vaddr, unsigned long size, enum page_cache_mode pcm); extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size); #define ioremap_wc ioremap_wc extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size); #define ioremap_wt ioremap_wt extern bool is_early_ioremap_ptep(pte_t *ptep); #define IO_SPACE_LIMIT 0xffff #include <asm-generic/io.h> #undef PCI_IOBASE #ifdef CONFIG_MTRR extern int __must_check arch_phys_wc_index(int handle); #define arch_phys_wc_index arch_phys_wc_index extern int __must_check arch_phys_wc_add(unsigned long base, unsigned long size); extern void arch_phys_wc_del(int handle); #define arch_phys_wc_add arch_phys_wc_add #endif #ifdef CONFIG_X86_PAT extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size); extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size); #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc #endif #ifdef CONFIG_AMD_MEM_ENCRYPT extern bool arch_memremap_can_ram_remap(resource_size_t offset, unsigned long size, unsigned long flags); #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap extern bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size); #else static inline bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) { return true; } #endif /** * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units * @dst: destination, in MMIO space (must be 512-bit aligned) * @src: source * @count: number of 512 bits quantities to submit * * Submit data from kernel space to MMIO space, in units of 512 bits at a * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. * * Warning: Do not use this helper unless your driver has checked that the CPU * instruction is supported on the platform. */ static inline void iosubmit_cmds512(void __iomem *dst, const void *src, size_t count) { const u8 *from = src; const u8 *end = from + count * 64; while (from < end) { movdir64b_io(dst, from); from += 64; } } #endif /* _ASM_X86_IO_H */
64 64 63 64 64 26 38 25 39 2 47 15 41 21 62 36 19 55 54 3 20 7 22 13 17 4 5 6 7 20 3 28 13 8 4 4 8 9 55 9 60 4 9 3 6 64 64 64 64 64 62 62 61 21 3 30 8 40 1 41 12 12 9 9 9 9 41 41 1 40 2 38 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 // SPDX-License-Identifier: GPL-2.0+ /* * Driver for USB Mass Storage compliant devices * * Current development and maintenance by: * (c) 1999-2003 Matthew Dharm (mdharm-usb@one-eyed-alien.net) * * Developed with the assistance of: * (c) 2000 David L. Brown, Jr. (usb-storage@davidb.org) * (c) 2003-2009 Alan Stern (stern@rowland.harvard.edu) * * Initial work by: * (c) 1999 Michael Gee (michael@linuxspecific.com) * * usb_device_id support by Adam J. Richter (adam@yggdrasil.com): * (c) 2000 Yggdrasil Computing, Inc. * * This driver is based on the 'USB Mass Storage Class' document. This * describes in detail the protocol used to communicate with such * devices. Clearly, the designers had SCSI and ATAPI commands in * mind when they created this document. The commands are all very * similar to commands in the SCSI-II and ATAPI specifications. * * It is important to note that in a number of cases this class * exhibits class-specific exemptions from the USB specification. * Notably the usage of NAK, STALL and ACK differs from the norm, in * that they are used to communicate wait, failed and OK on commands. * * Also, for certain devices, the interrupt endpoint is used to convey * status of a command. */ #ifdef CONFIG_USB_STORAGE_DEBUG #define DEBUG #endif #include <linux/sched.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/utsname.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include <linux/usb/hcd.h> #include "scsiglue.h" #include "transport.h" #include "protocol.h" #include "debug.h" #include "initializers.h" #include "sierra_ms.h" #include "option_ms.h" #if IS_ENABLED(CONFIG_USB_UAS) #include "uas-detect.h" #endif #define DRV_NAME "usb-storage" /* Some informational data */ MODULE_AUTHOR("Matthew Dharm <mdharm-usb@one-eyed-alien.net>"); MODULE_DESCRIPTION("USB Mass Storage driver for Linux"); MODULE_LICENSE("GPL"); static unsigned int delay_use = 1 * MSEC_PER_SEC; /** * parse_delay_str - parse an unsigned decimal integer delay * @str: String to parse. * @ndecimals: Number of decimal to scale up. * @suffix: Suffix string to parse. * @val: Where to store the parsed value. * * Parse an unsigned decimal value in @str, optionally end with @suffix. * Stores the parsed value in @val just as it is if @str ends with @suffix. * Otherwise store the value scale up by 10^(@ndecimal). * * Returns 0 on success, a negative error code otherwise. */ static int parse_delay_str(const char *str, int ndecimals, const char *suffix, unsigned int *val) { int n, n2, l; char buf[16]; l = strlen(suffix); n = strlen(str); if (n > 0 && str[n - 1] == '\n') --n; if (n >= l && !strncmp(&str[n - l], suffix, l)) { n -= l; n2 = 0; } else n2 = ndecimals; if (n + n2 > sizeof(buf) - 1) return -EINVAL; memcpy(buf, str, n); while (n2-- > 0) buf[n++] = '0'; buf[n] = 0; return kstrtouint(buf, 10, val); } /** * format_delay_ms - format an integer value into a delay string * @val: The integer value to format, scaled by 10^(@ndecimals). * @ndecimals: Number of decimal to scale down. * @suffix: Suffix string to format. * @str: Where to store the formatted string. * @size: The size of buffer for @str. * * Format an integer value in @val scale down by 10^(@ndecimals) without @suffix * if @val is divisible by 10^(@ndecimals). * Otherwise format a value in @val just as it is with @suffix * * Returns the number of characters written into @str. */ static int format_delay_ms(unsigned int val, int ndecimals, const char *suffix, char *str, int size) { u64 delay_ms = val; unsigned int rem = do_div(delay_ms, int_pow(10, ndecimals)); int ret; if (rem) ret = scnprintf(str, size, "%u%s\n", val, suffix); else ret = scnprintf(str, size, "%u\n", (unsigned int)delay_ms); return ret; } static int delay_use_set(const char *s, const struct kernel_param *kp) { unsigned int delay_ms; int ret; ret = parse_delay_str(skip_spaces(s), 3, "ms", &delay_ms); if (ret < 0) return ret; *((unsigned int *)kp->arg) = delay_ms; return 0; } static int delay_use_get(char *s, const struct kernel_param *kp) { unsigned int delay_ms = *((unsigned int *)kp->arg); return format_delay_ms(delay_ms, 3, "ms", s, PAGE_SIZE); } static const struct kernel_param_ops delay_use_ops = { .set = delay_use_set, .get = delay_use_get, }; module_param_cb(delay_use, &delay_use_ops, &delay_use, 0644); MODULE_PARM_DESC(delay_use, "time to delay before using a new device"); static char quirks[128]; module_param_string(quirks, quirks, sizeof(quirks), S_IRUGO | S_IWUSR); MODULE_PARM_DESC(quirks, "supplemental list of device IDs and their quirks"); /* * The entries in this table correspond, line for line, * with the entries in usb_storage_usb_ids[], defined in usual-tables.c. */ /* *The vendor name should be kept at eight characters or less, and * the product name should be kept at 16 characters or less. If a device * has the US_FL_FIX_INQUIRY flag, then the vendor and product names * normally generated by a device through the INQUIRY response will be * taken from this list, and this is the reason for the above size * restriction. However, if the flag is not present, then you * are free to use as many characters as you like. */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } #define COMPLIANT_DEV UNUSUAL_DEV #define USUAL_DEV(use_protocol, use_transport) \ { \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ } static const struct us_unusual_dev us_unusual_dev_list[] = { # include "unusual_devs.h" { } /* Terminating entry */ }; static const struct us_unusual_dev for_dynamic_ids = USUAL_DEV(USB_SC_SCSI, USB_PR_BULK); #undef UNUSUAL_DEV #undef COMPLIANT_DEV #undef USUAL_DEV #ifdef CONFIG_LOCKDEP static struct lock_class_key us_interface_key[USB_MAXINTERFACES]; static void us_set_lock_class(struct mutex *mutex, struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_config *config = udev->actconfig; int i; for (i = 0; i < config->desc.bNumInterfaces; i++) { if (config->interface[i] == intf) break; } BUG_ON(i == config->desc.bNumInterfaces); lockdep_set_class(mutex, &us_interface_key[i]); } #else static void us_set_lock_class(struct mutex *mutex, struct usb_interface *intf) { } #endif #ifdef CONFIG_PM /* Minimal support for suspend and resume */ int usb_stor_suspend(struct usb_interface *iface, pm_message_t message) { struct us_data *us = usb_get_intfdata(iface); /* Wait until no command is running */ mutex_lock(&us->dev_mutex); if (us->suspend_resume_hook) (us->suspend_resume_hook)(us, US_SUSPEND); /* * When runtime PM is working, we'll set a flag to indicate * whether we should autoresume when a SCSI request arrives. */ mutex_unlock(&us->dev_mutex); return 0; } EXPORT_SYMBOL_GPL(usb_stor_suspend); int usb_stor_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); mutex_lock(&us->dev_mutex); if (us->suspend_resume_hook) (us->suspend_resume_hook)(us, US_RESUME); mutex_unlock(&us->dev_mutex); return 0; } EXPORT_SYMBOL_GPL(usb_stor_resume); int usb_stor_reset_resume(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); /* Report the reset to the SCSI core */ usb_stor_report_bus_reset(us); /* * If any of the subdrivers implemented a reinitialization scheme, * this is where the callback would be invoked. */ return 0; } EXPORT_SYMBOL_GPL(usb_stor_reset_resume); #endif /* CONFIG_PM */ /* * The next two routines get called just before and just after * a USB port reset, whether from this driver or a different one. */ int usb_stor_pre_reset(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); /* Make sure no command runs during the reset */ mutex_lock(&us->dev_mutex); return 0; } EXPORT_SYMBOL_GPL(usb_stor_pre_reset); int usb_stor_post_reset(struct usb_interface *iface) { struct us_data *us = usb_get_intfdata(iface); /* Report the reset to the SCSI core */ usb_stor_report_bus_reset(us); /* * If any of the subdrivers implemented a reinitialization scheme, * this is where the callback would be invoked. */ mutex_unlock(&us->dev_mutex); return 0; } EXPORT_SYMBOL_GPL(usb_stor_post_reset); /* * fill_inquiry_response takes an unsigned char array (which must * be at least 36 characters) and populates the vendor name, * product name, and revision fields. Then the array is copied * into the SCSI command's response buffer (oddly enough * called request_buffer). data_len contains the length of the * data array, which again must be at least 36. */ void fill_inquiry_response(struct us_data *us, unsigned char *data, unsigned int data_len) { if (data_len < 36) /* You lose. */ return; memset(data+8, ' ', 28); if (data[0]&0x20) { /* * USB device currently not connected. Return * peripheral qualifier 001b ("...however, the * physical device is not currently connected * to this logical unit") and leave vendor and * product identification empty. ("If the target * does store some of the INQUIRY data on the * device, it may return zeros or ASCII spaces * (20h) in those fields until the data is * available from the device."). */ } else { u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice); int n; n = strlen(us->unusual_dev->vendorName); memcpy(data+8, us->unusual_dev->vendorName, min(8, n)); n = strlen(us->unusual_dev->productName); memcpy(data+16, us->unusual_dev->productName, min(16, n)); data[32] = 0x30 + ((bcdDevice>>12) & 0x0F); data[33] = 0x30 + ((bcdDevice>>8) & 0x0F); data[34] = 0x30 + ((bcdDevice>>4) & 0x0F); data[35] = 0x30 + ((bcdDevice) & 0x0F); } usb_stor_set_xfer_buf(data, data_len, us->srb); } EXPORT_SYMBOL_GPL(fill_inquiry_response); static int usb_stor_control_thread(void * __us) { struct us_data *us = (struct us_data *)__us; struct Scsi_Host *host = us_to_host(us); struct scsi_cmnd *srb; for (;;) { usb_stor_dbg(us, "*** thread sleeping\n"); if (wait_for_completion_interruptible(&us->cmnd_ready)) break; usb_stor_dbg(us, "*** thread awakened\n"); /* lock the device pointers */ mutex_lock(&(us->dev_mutex)); /* lock access to the state */ scsi_lock(host); /* When we are called with no command pending, we're done */ srb = us->srb; if (srb == NULL) { scsi_unlock(host); mutex_unlock(&us->dev_mutex); usb_stor_dbg(us, "-- exiting\n"); break; } /* has the command timed out *already* ? */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { srb->result = DID_ABORT << 16; goto SkipForAbort; } scsi_unlock(host); /* * reject the command if the direction indicator * is UNKNOWN */ if (srb->sc_data_direction == DMA_BIDIRECTIONAL) { usb_stor_dbg(us, "UNKNOWN data direction\n"); srb->result = DID_ERROR << 16; } /* * reject if target != 0 or if LUN is higher than * the maximum known LUN */ else if (srb->device->id && !(us->fflags & US_FL_SCM_MULT_TARG)) { usb_stor_dbg(us, "Bad target number (%d:%llu)\n", srb->device->id, srb->device->lun); srb->result = DID_BAD_TARGET << 16; } else if (srb->device->lun > us->max_lun) { usb_stor_dbg(us, "Bad LUN (%d:%llu)\n", srb->device->id, srb->device->lun); srb->result = DID_BAD_TARGET << 16; } /* * Handle those devices which need us to fake * their inquiry data */ else if ((srb->cmnd[0] == INQUIRY) && (us->fflags & US_FL_FIX_INQUIRY)) { unsigned char data_ptr[36] = { 0x00, 0x80, 0x02, 0x02, 0x1F, 0x00, 0x00, 0x00}; usb_stor_dbg(us, "Faking INQUIRY command\n"); fill_inquiry_response(us, data_ptr, 36); srb->result = SAM_STAT_GOOD; } /* we've got a command, let's do it! */ else { US_DEBUG(usb_stor_show_command(us, srb)); us->proto_handler(srb, us); usb_mark_last_busy(us->pusb_dev); } /* lock access to the state */ scsi_lock(host); /* was the command aborted? */ if (srb->result == DID_ABORT << 16) { SkipForAbort: usb_stor_dbg(us, "scsi command aborted\n"); srb = NULL; /* Don't call scsi_done() */ } /* * If an abort request was received we need to signal that * the abort has finished. The proper test for this is * the TIMED_OUT flag, not srb->result == DID_ABORT, because * the timeout might have occurred after the command had * already completed with a different result code. */ if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { complete(&(us->notify)); /* Allow USB transfers to resume */ clear_bit(US_FLIDX_ABORTING, &us->dflags); clear_bit(US_FLIDX_TIMED_OUT, &us->dflags); } /* finished working on this command */ us->srb = NULL; scsi_unlock(host); /* unlock the device pointers */ mutex_unlock(&us->dev_mutex); /* now that the locks are released, notify the SCSI core */ if (srb) { usb_stor_dbg(us, "scsi cmd done, result=0x%x\n", srb->result); scsi_done_direct(srb); } } /* for (;;) */ /* Wait until we are told to stop */ for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) break; schedule(); } __set_current_state(TASK_RUNNING); return 0; } /*********************************************************************** * Device probing and disconnecting ***********************************************************************/ /* Associate our private data with the USB device */ static int associate_dev(struct us_data *us, struct usb_interface *intf) { /* Fill in the device-related fields */ us->pusb_dev = interface_to_usbdev(intf); us->pusb_intf = intf; us->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; usb_stor_dbg(us, "Vendor: 0x%04x, Product: 0x%04x, Revision: 0x%04x\n", le16_to_cpu(us->pusb_dev->descriptor.idVendor), le16_to_cpu(us->pusb_dev->descriptor.idProduct), le16_to_cpu(us->pusb_dev->descriptor.bcdDevice)); usb_stor_dbg(us, "Interface Subclass: 0x%02x, Protocol: 0x%02x\n", intf->cur_altsetting->desc.bInterfaceSubClass, intf->cur_altsetting->desc.bInterfaceProtocol); /* Store our private data in the interface */ usb_set_intfdata(intf, us); /* Allocate the control/setup and DMA-mapped buffers */ us->cr = kmalloc(sizeof(*us->cr), GFP_KERNEL); if (!us->cr) return -ENOMEM; us->iobuf = usb_alloc_coherent(us->pusb_dev, US_IOBUF_SIZE, GFP_KERNEL, &us->iobuf_dma); if (!us->iobuf) { usb_stor_dbg(us, "I/O buffer allocation failed\n"); return -ENOMEM; } return 0; } /* Works only for digits and letters, but small and fast */ #define TOLOWER(x) ((x) | 0x20) /* Adjust device flags based on the "quirks=" module parameter */ void usb_stor_adjust_quirks(struct usb_device *udev, u64 *fflags) { char *p; u16 vid = le16_to_cpu(udev->descriptor.idVendor); u16 pid = le16_to_cpu(udev->descriptor.idProduct); u64 f = 0; u64 mask = (US_FL_SANE_SENSE | US_FL_BAD_SENSE | US_FL_FIX_CAPACITY | US_FL_IGNORE_UAS | US_FL_CAPACITY_HEURISTICS | US_FL_IGNORE_DEVICE | US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 | US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE | US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT | US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 | US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE | US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES | US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS | US_FL_ALWAYS_SYNC); p = quirks; while (*p) { /* Each entry consists of VID:PID:flags */ if (vid == simple_strtoul(p, &p, 16) && *p == ':' && pid == simple_strtoul(p+1, &p, 16) && *p == ':') break; /* Move forward to the next entry */ while (*p) { if (*p++ == ',') break; } } if (!*p) /* No match */ return; /* Collect the flags */ while (*++p && *p != ',') { switch (TOLOWER(*p)) { case 'a': f |= US_FL_SANE_SENSE; break; case 'b': f |= US_FL_BAD_SENSE; break; case 'c': f |= US_FL_FIX_CAPACITY; break; case 'd': f |= US_FL_NO_READ_DISC_INFO; break; case 'e': f |= US_FL_NO_READ_CAPACITY_16; break; case 'f': f |= US_FL_NO_REPORT_OPCODES; break; case 'g': f |= US_FL_MAX_SECTORS_240; break; case 'h': f |= US_FL_CAPACITY_HEURISTICS; break; case 'i': f |= US_FL_IGNORE_DEVICE; break; case 'j': f |= US_FL_NO_REPORT_LUNS; break; case 'k': f |= US_FL_NO_SAME; break; case 'l': f |= US_FL_NOT_LOCKABLE; break; case 'm': f |= US_FL_MAX_SECTORS_64; break; case 'n': f |= US_FL_INITIAL_READ10; break; case 'o': f |= US_FL_CAPACITY_OK; break; case 'p': f |= US_FL_WRITE_CACHE; break; case 'r': f |= US_FL_IGNORE_RESIDUE; break; case 's': f |= US_FL_SINGLE_LUN; break; case 't': f |= US_FL_NO_ATA_1X; break; case 'u': f |= US_FL_IGNORE_UAS; break; case 'w': f |= US_FL_NO_WP_DETECT; break; case 'y': f |= US_FL_ALWAYS_SYNC; break; /* Ignore unrecognized flag characters */ } } *fflags = (*fflags & ~mask) | f; } EXPORT_SYMBOL_GPL(usb_stor_adjust_quirks); /* Get the unusual_devs entries and the string descriptors */ static int get_device_info(struct us_data *us, const struct usb_device_id *id, const struct us_unusual_dev *unusual_dev) { struct usb_device *dev = us->pusb_dev; struct usb_interface_descriptor *idesc = &us->pusb_intf->cur_altsetting->desc; struct device *pdev = &us->pusb_intf->dev; /* Store the entries */ us->unusual_dev = unusual_dev; us->subclass = (unusual_dev->useProtocol == USB_SC_DEVICE) ? idesc->bInterfaceSubClass : unusual_dev->useProtocol; us->protocol = (unusual_dev->useTransport == USB_PR_DEVICE) ? idesc->bInterfaceProtocol : unusual_dev->useTransport; us->fflags = id->driver_info; usb_stor_adjust_quirks(us->pusb_dev, &us->fflags); if (us->fflags & US_FL_IGNORE_DEVICE) { dev_info(pdev, "device ignored\n"); return -ENODEV; } /* * This flag is only needed when we're in high-speed, so let's * disable it if we're in full-speed */ if (dev->speed != USB_SPEED_HIGH) us->fflags &= ~US_FL_GO_SLOW; if (us->fflags) dev_info(pdev, "Quirks match for vid %04x pid %04x: %llx\n", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct), us->fflags); /* * Log a message if a non-generic unusual_dev entry contains an * unnecessary subclass or protocol override. This may stimulate * reports from users that will help us remove unneeded entries * from the unusual_devs.h table. */ if (id->idVendor || id->idProduct) { static const char *msgs[3] = { "an unneeded SubClass entry", "an unneeded Protocol entry", "unneeded SubClass and Protocol entries"}; struct usb_device_descriptor *ddesc = &dev->descriptor; int msg = -1; if (unusual_dev->useProtocol != USB_SC_DEVICE && us->subclass == idesc->bInterfaceSubClass) msg += 1; if (unusual_dev->useTransport != USB_PR_DEVICE && us->protocol == idesc->bInterfaceProtocol) msg += 2; if (msg >= 0 && !(us->fflags & US_FL_NEED_OVERRIDE)) dev_notice(pdev, "This device " "(%04x,%04x,%04x S %02x P %02x)" " has %s in unusual_devs.h (kernel" " %s)\n" " Please send a copy of this message to " "<linux-usb@vger.kernel.org> and " "<usb-storage@lists.one-eyed-alien.net>\n", le16_to_cpu(ddesc->idVendor), le16_to_cpu(ddesc->idProduct), le16_to_cpu(ddesc->bcdDevice), idesc->bInterfaceSubClass, idesc->bInterfaceProtocol, msgs[msg], utsname()->release); } return 0; } /* Get the transport settings */ static void get_transport(struct us_data *us) { switch (us->protocol) { case USB_PR_CB: us->transport_name = "Control/Bulk"; us->transport = usb_stor_CB_transport; us->transport_reset = usb_stor_CB_reset; us->max_lun = 7; break; case USB_PR_CBI: us->transport_name = "Control/Bulk/Interrupt"; us->transport = usb_stor_CB_transport; us->transport_reset = usb_stor_CB_reset; us->max_lun = 7; break; case USB_PR_BULK: us->transport_name = "Bulk"; us->transport = usb_stor_Bulk_transport; us->transport_reset = usb_stor_Bulk_reset; break; } } /* Get the protocol settings */ static void get_protocol(struct us_data *us) { switch (us->subclass) { case USB_SC_RBC: us->protocol_name = "Reduced Block Commands (RBC)"; us->proto_handler = usb_stor_transparent_scsi_command; break; case USB_SC_8020: us->protocol_name = "8020i"; us->proto_handler = usb_stor_pad12_command; us->max_lun = 0; break; case USB_SC_QIC: us->protocol_name = "QIC-157"; us->proto_handler = usb_stor_pad12_command; us->max_lun = 0; break; case USB_SC_8070: us->protocol_name = "8070i"; us->proto_handler = usb_stor_pad12_command; us->max_lun = 0; break; case USB_SC_SCSI: us->protocol_name = "Transparent SCSI"; us->proto_handler = usb_stor_transparent_scsi_command; break; case USB_SC_UFI: us->protocol_name = "Uniform Floppy Interface (UFI)"; us->proto_handler = usb_stor_ufi_command; break; } } /* Get the pipe settings */ static int get_pipes(struct us_data *us) { struct usb_host_interface *alt = us->pusb_intf->cur_altsetting; struct usb_endpoint_descriptor *ep_in; struct usb_endpoint_descriptor *ep_out; struct usb_endpoint_descriptor *ep_int; int res; /* * Find the first endpoint of each type we need. * We are expecting a minimum of 2 endpoints - in and out (bulk). * An optional interrupt-in is OK (necessary for CBI protocol). * We will ignore any others. */ res = usb_find_common_endpoints(alt, &ep_in, &ep_out, NULL, NULL); if (res) { usb_stor_dbg(us, "bulk endpoints not found\n"); return res; } res = usb_find_int_in_endpoint(alt, &ep_int); if (res && us->protocol == USB_PR_CBI) { usb_stor_dbg(us, "interrupt endpoint not found\n"); return res; } /* Calculate and store the pipe values */ us->send_ctrl_pipe = usb_sndctrlpipe(us->pusb_dev, 0); us->recv_ctrl_pipe = usb_rcvctrlpipe(us->pusb_dev, 0); us->send_bulk_pipe = usb_sndbulkpipe(us->pusb_dev, usb_endpoint_num(ep_out)); us->recv_bulk_pipe = usb_rcvbulkpipe(us->pusb_dev, usb_endpoint_num(ep_in)); if (ep_int) { us->recv_intr_pipe = usb_rcvintpipe(us->pusb_dev, usb_endpoint_num(ep_int)); us->ep_bInterval = ep_int->bInterval; } return 0; } /* Initialize all the dynamic resources we need */ static int usb_stor_acquire_resources(struct us_data *us) { int p; struct task_struct *th; us->current_urb = usb_alloc_urb(0, GFP_KERNEL); if (!us->current_urb) return -ENOMEM; /* * Just before we start our control thread, initialize * the device if it needs initialization */ if (us->unusual_dev->initFunction) { p = us->unusual_dev->initFunction(us); if (p) return p; } /* Start up our control thread */ th = kthread_run(usb_stor_control_thread, us, "usb-storage"); if (IS_ERR(th)) { dev_warn(&us->pusb_intf->dev, "Unable to start control thread\n"); return PTR_ERR(th); } us->ctl_thread = th; return 0; } /* Release all our dynamic resources */ static void usb_stor_release_resources(struct us_data *us) { /* * Tell the control thread to exit. The SCSI host must * already have been removed and the DISCONNECTING flag set * so that we won't accept any more commands. */ usb_stor_dbg(us, "-- sending exit command to thread\n"); complete(&us->cmnd_ready); if (us->ctl_thread) kthread_stop(us->ctl_thread); /* Call the destructor routine, if it exists */ if (us->extra_destructor) { usb_stor_dbg(us, "-- calling extra_destructor()\n"); us->extra_destructor(us->extra); } /* Free the extra data and the URB */ kfree(us->extra); usb_free_urb(us->current_urb); } /* Dissociate from the USB device */ static void dissociate_dev(struct us_data *us) { /* Free the buffers */ kfree(us->cr); usb_free_coherent(us->pusb_dev, US_IOBUF_SIZE, us->iobuf, us->iobuf_dma); /* Remove our private data from the interface */ usb_set_intfdata(us->pusb_intf, NULL); } /* * First stage of disconnect processing: stop SCSI scanning, * remove the host, and stop accepting new commands */ static void quiesce_and_remove_host(struct us_data *us) { struct Scsi_Host *host = us_to_host(us); /* If the device is really gone, cut short reset delays */ if (us->pusb_dev->state == USB_STATE_NOTATTACHED) { set_bit(US_FLIDX_DISCONNECTING, &us->dflags); wake_up(&us->delay_wait); } /* * Prevent SCSI scanning (if it hasn't started yet) * or wait for the SCSI-scanning routine to stop. */ cancel_delayed_work_sync(&us->scan_dwork); /* Balance autopm calls if scanning was cancelled */ if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags)) usb_autopm_put_interface_no_suspend(us->pusb_intf); /* * Removing the host will perform an orderly shutdown: caches * synchronized, disks spun down, etc. */ scsi_remove_host(host); /* * Prevent any new commands from being accepted and cut short * reset delays. */ scsi_lock(host); set_bit(US_FLIDX_DISCONNECTING, &us->dflags); scsi_unlock(host); wake_up(&us->delay_wait); } /* Second stage of disconnect processing: deallocate all resources */ static void release_everything(struct us_data *us) { usb_stor_release_resources(us); dissociate_dev(us); /* * Drop our reference to the host; the SCSI core will free it * (and "us" along with it) when the refcount becomes 0. */ scsi_host_put(us_to_host(us)); } /* Delayed-work routine to carry out SCSI-device scanning */ static void usb_stor_scan_dwork(struct work_struct *work) { struct us_data *us = container_of(work, struct us_data, scan_dwork.work); struct device *dev = &us->pusb_intf->dev; dev_dbg(dev, "starting scan\n"); /* For bulk-only devices, determine the max LUN value */ if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN) && !(us->fflags & US_FL_SCM_MULT_TARG)) { mutex_lock(&us->dev_mutex); us->max_lun = usb_stor_Bulk_max_lun(us); /* * Allow proper scanning of devices that present more than 8 LUNs * While not affecting other devices that may need the previous * behavior */ if (us->max_lun >= 8) us_to_host(us)->max_lun = us->max_lun+1; mutex_unlock(&us->dev_mutex); } scsi_scan_host(us_to_host(us)); dev_dbg(dev, "scan complete\n"); /* Should we unbind if no devices were detected? */ usb_autopm_put_interface(us->pusb_intf); clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags); } static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf) { struct usb_device *usb_dev = interface_to_usbdev(intf); if (usb_dev->bus->sg_tablesize) { return usb_dev->bus->sg_tablesize; } return SG_ALL; } /* First part of general USB mass-storage probing */ int usb_stor_probe1(struct us_data **pus, struct usb_interface *intf, const struct usb_device_id *id, const struct us_unusual_dev *unusual_dev, const struct scsi_host_template *sht) { struct Scsi_Host *host; struct us_data *us; int result; dev_info(&intf->dev, "USB Mass Storage device detected\n"); /* * Ask the SCSI layer to allocate a host structure, with extra * space at the end for our private us_data structure. */ host = scsi_host_alloc(sht, sizeof(*us)); if (!host) { dev_warn(&intf->dev, "Unable to allocate the scsi host\n"); return -ENOMEM; } /* * Allow 16-byte CDBs and thus > 2TB */ host->max_cmd_len = 16; host->sg_tablesize = usb_stor_sg_tablesize(intf); *pus = us = host_to_us(host); mutex_init(&(us->dev_mutex)); us_set_lock_class(&us->dev_mutex, intf); init_completion(&us->cmnd_ready); init_completion(&(us->notify)); init_waitqueue_head(&us->delay_wait); INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork); /* Associate the us_data structure with the USB device */ result = associate_dev(us, intf); if (result) goto BadDevice; /* * Some USB host controllers can't do DMA: They have to use PIO, or they * have to use a small dedicated local memory area, or they have other * restrictions on addressable memory. * * We can't support these controllers on highmem systems as we don't * kmap or bounce buffer. */ if (IS_ENABLED(CONFIG_HIGHMEM) && (!hcd_uses_dma(bus_to_hcd(us->pusb_dev->bus)) || bus_to_hcd(us->pusb_dev->bus)->localmem_pool)) { dev_warn(&intf->dev, "USB Mass Storage not supported on this host controller\n"); result = -EINVAL; goto release; } /* Get the unusual_devs entries and the descriptors */ result = get_device_info(us, id, unusual_dev); if (result) goto BadDevice; /* Get standard transport and protocol settings */ get_transport(us); get_protocol(us); /* * Give the caller a chance to fill in specialized transport * or protocol settings. */ return 0; BadDevice: usb_stor_dbg(us, "storage_probe() failed\n"); release: release_everything(us); return result; } EXPORT_SYMBOL_GPL(usb_stor_probe1); /* Second part of general USB mass-storage probing */ int usb_stor_probe2(struct us_data *us) { int result; struct device *dev = &us->pusb_intf->dev; /* Make sure the transport and protocol have both been set */ if (!us->transport || !us->proto_handler) { result = -ENXIO; goto BadDevice; } usb_stor_dbg(us, "Transport: %s\n", us->transport_name); usb_stor_dbg(us, "Protocol: %s\n", us->protocol_name); if (us->fflags & US_FL_SCM_MULT_TARG) { /* * SCM eUSCSI bridge devices can have different numbers * of LUNs on different targets; allow all to be probed. */ us->max_lun = 7; /* The eUSCSI itself has ID 7, so avoid scanning that */ us_to_host(us)->this_id = 7; /* max_id is 8 initially, so no need to set it here */ } else { /* In the normal case there is only a single target */ us_to_host(us)->max_id = 1; /* * Like Windows, we won't store the LUN bits in CDB[1] for * SCSI-2 devices using the Bulk-Only transport (even though * this violates the SCSI spec). */ if (us->transport == usb_stor_Bulk_transport) us_to_host(us)->no_scsi2_lun_in_cdb = 1; } /* fix for single-lun devices */ if (us->fflags & US_FL_SINGLE_LUN) us->max_lun = 0; /* Find the endpoints and calculate pipe values */ result = get_pipes(us); if (result) goto BadDevice; /* * If the device returns invalid data for the first READ(10) * command, indicate the command should be retried. */ if (us->fflags & US_FL_INITIAL_READ10) set_bit(US_FLIDX_REDO_READ10, &us->dflags); /* Acquire all the other resources and add the host */ result = usb_stor_acquire_resources(us); if (result) goto BadDevice; usb_autopm_get_interface_no_resume(us->pusb_intf); snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s", dev_name(&us->pusb_intf->dev)); result = scsi_add_host(us_to_host(us), dev); if (result) { dev_warn(dev, "Unable to add the scsi host\n"); goto HostAddErr; } /* Submit the delayed_work for SCSI-device scanning */ set_bit(US_FLIDX_SCAN_PENDING, &us->dflags); if (delay_use > 0) dev_dbg(dev, "waiting for device to settle before scanning\n"); queue_delayed_work(system_freezable_wq, &us->scan_dwork, msecs_to_jiffies(delay_use)); return 0; /* We come here if there are any problems */ HostAddErr: usb_autopm_put_interface_no_suspend(us->pusb_intf); BadDevice: usb_stor_dbg(us, "storage_probe() failed\n"); release_everything(us); return result; } EXPORT_SYMBOL_GPL(usb_stor_probe2); /* Handle a USB mass-storage disconnect */ void usb_stor_disconnect(struct usb_interface *intf) { struct us_data *us = usb_get_intfdata(intf); quiesce_and_remove_host(us); release_everything(us); } EXPORT_SYMBOL_GPL(usb_stor_disconnect); static struct scsi_host_template usb_stor_host_template; /* The main probe routine for standard devices */ static int storage_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct us_unusual_dev *unusual_dev; struct us_data *us; int result; int size; /* If uas is enabled and this device can do uas then ignore it. */ #if IS_ENABLED(CONFIG_USB_UAS) if (uas_use_uas_driver(intf, id, NULL)) return -ENXIO; #endif /* * If the device isn't standard (is handled by a subdriver * module) then don't accept it. */ if (usb_usual_ignore_device(intf)) return -ENXIO; /* * Call the general probe procedures. * * The unusual_dev_list array is parallel to the usb_storage_usb_ids * table, so we use the index of the id entry to find the * corresponding unusual_devs entry. */ size = ARRAY_SIZE(us_unusual_dev_list); if (id >= usb_storage_usb_ids && id < usb_storage_usb_ids + size) { unusual_dev = (id - usb_storage_usb_ids) + us_unusual_dev_list; } else { unusual_dev = &for_dynamic_ids; dev_dbg(&intf->dev, "Use Bulk-Only transport with the Transparent SCSI protocol for dynamic id: 0x%04x 0x%04x\n", id->idVendor, id->idProduct); } result = usb_stor_probe1(&us, intf, id, unusual_dev, &usb_stor_host_template); if (result) return result; /* No special transport or protocol settings in the main module */ result = usb_stor_probe2(us); return result; } static struct usb_driver usb_storage_driver = { .name = DRV_NAME, .probe = storage_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = usb_storage_usb_ids, .supports_autosuspend = 1, .soft_unbind = 1, }; module_usb_stor_driver(usb_storage_driver, usb_stor_host_template, DRV_NAME);
2 2 1 1 1 1 2 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 // SPDX-License-Identifier: GPL-2.0-only /* * HIDPP protocol for Logitech receivers * * Copyright (c) 2011 Logitech (c) * Copyright (c) 2012-2013 Google (c) * Copyright (c) 2013-2014 Red Hat Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/kfifo.h> #include <linux/input/mt.h> #include <linux/workqueue.h> #include <linux/atomic.h> #include <linux/fixp-arith.h> #include <linux/unaligned.h> #include "usbhid/usbhid.h" #include "hid-ids.h" MODULE_DESCRIPTION("Support for Logitech devices relying on the HID++ specification"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>"); MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>"); MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>"); static bool disable_tap_to_click; module_param(disable_tap_to_click, bool, 0644); MODULE_PARM_DESC(disable_tap_to_click, "Disable Tap-To-Click mode reporting for touchpads (only on the K400 currently)."); /* Define a non-zero software ID to identify our own requests */ #define LINUX_KERNEL_SW_ID 0x01 #define REPORT_ID_HIDPP_SHORT 0x10 #define REPORT_ID_HIDPP_LONG 0x11 #define REPORT_ID_HIDPP_VERY_LONG 0x12 #define HIDPP_REPORT_SHORT_LENGTH 7 #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64 #define HIDPP_REPORT_SHORT_SUPPORTED BIT(0) #define HIDPP_REPORT_LONG_SUPPORTED BIT(1) #define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2) #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03 #define HIDPP_SUB_ID_ROLLER 0x05 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06 #define HIDPP_SUB_ID_USER_IFACE_EVENT 0x08 #define HIDPP_USER_IFACE_EVENT_ENCRYPTION_KEY_LOST BIT(5) #define HIDPP_QUIRK_CLASS_WTP BIT(0) #define HIDPP_QUIRK_CLASS_M560 BIT(1) #define HIDPP_QUIRK_CLASS_K400 BIT(2) #define HIDPP_QUIRK_CLASS_G920 BIT(3) #define HIDPP_QUIRK_CLASS_K750 BIT(4) /* bits 2..20 are reserved for classes */ /* #define HIDPP_QUIRK_CONNECT_EVENTS BIT(21) disabled */ #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22) #define HIDPP_QUIRK_DELAYED_INIT BIT(23) #define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24) #define HIDPP_QUIRK_HIDPP_WHEELS BIT(25) #define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(26) #define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(27) #define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(28) #define HIDPP_QUIRK_WIRELESS_STATUS BIT(29) /* These are just aliases for now */ #define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS #define HIDPP_QUIRK_KBD_ZOOM_WHEEL HIDPP_QUIRK_HIDPP_WHEELS /* Convenience constant to check for any high-res support. */ #define HIDPP_CAPABILITY_HI_RES_SCROLL (HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL | \ HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \ HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL) #define HIDPP_CAPABILITY_HIDPP10_BATTERY BIT(0) #define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1) #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2) #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3) #define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4) #define HIDPP_CAPABILITY_BATTERY_PERCENTAGE BIT(5) #define HIDPP_CAPABILITY_UNIFIED_BATTERY BIT(6) #define HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL BIT(7) #define HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL BIT(8) #define HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL BIT(9) #define HIDPP_CAPABILITY_ADC_MEASUREMENT BIT(10) #define lg_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) /* * There are two hidpp protocols in use, the first version hidpp10 is known * as register access protocol or RAP, the second version hidpp20 is known as * feature access protocol or FAP * * Most older devices (including the Unifying usb receiver) use the RAP protocol * where as most newer devices use the FAP protocol. Both protocols are * compatible with the underlying transport, which could be usb, Unifiying, or * bluetooth. The message lengths are defined by the hid vendor specific report * descriptor for the HIDPP_SHORT report type (total message lenth 7 bytes) and * the HIDPP_LONG report type (total message length 20 bytes) * * The RAP protocol uses both report types, whereas the FAP only uses HIDPP_LONG * messages. The Unifying receiver itself responds to RAP messages (device index * is 0xFF for the receiver), and all messages (short or long) with a device * index between 1 and 6 are passed untouched to the corresponding paired * Unifying device. * * The paired device can be RAP or FAP, it will receive the message untouched * from the Unifiying receiver. */ struct fap { u8 feature_index; u8 funcindex_clientid; u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U]; }; struct rap { u8 sub_id; u8 reg_address; u8 params[HIDPP_REPORT_VERY_LONG_MAX_LENGTH - 4U]; }; struct hidpp_report { u8 report_id; u8 device_index; union { struct fap fap; struct rap rap; u8 rawbytes[sizeof(struct fap)]; }; } __packed; struct hidpp_battery { u8 feature_index; u8 solar_feature_index; u8 voltage_feature_index; u8 adc_measurement_feature_index; struct power_supply_desc desc; struct power_supply *ps; char name[64]; int status; int capacity; int level; int voltage; int charge_type; bool online; u8 supported_levels_1004; }; /** * struct hidpp_scroll_counter - Utility class for processing high-resolution * scroll events. * @dev: the input device for which events should be reported. * @wheel_multiplier: the scalar multiplier to be applied to each wheel event * @remainder: counts the number of high-resolution units moved since the last * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should * only be used by class methods. * @direction: direction of last movement (1 or -1) * @last_time: last event time, used to reset remainder after inactivity */ struct hidpp_scroll_counter { int wheel_multiplier; int remainder; int direction; unsigned long long last_time; }; struct hidpp_device { struct hid_device *hid_dev; struct input_dev *input; struct mutex send_mutex; void *send_receive_buf; char *name; /* will never be NULL and should not be freed */ wait_queue_head_t wait; int very_long_report_length; bool answer_available; u8 protocol_major; u8 protocol_minor; void *private_data; struct work_struct work; struct kfifo delayed_work_fifo; struct input_dev *delayed_input; unsigned long quirks; unsigned long capabilities; u8 supported_reports; struct hidpp_battery battery; struct hidpp_scroll_counter vertical_wheel_counter; u8 wireless_feature_index; bool connected_once; }; /* HID++ 1.0 error codes */ #define HIDPP_ERROR 0x8f #define HIDPP_ERROR_SUCCESS 0x00 #define HIDPP_ERROR_INVALID_SUBID 0x01 #define HIDPP_ERROR_INVALID_ADRESS 0x02 #define HIDPP_ERROR_INVALID_VALUE 0x03 #define HIDPP_ERROR_CONNECT_FAIL 0x04 #define HIDPP_ERROR_TOO_MANY_DEVICES 0x05 #define HIDPP_ERROR_ALREADY_EXISTS 0x06 #define HIDPP_ERROR_BUSY 0x07 #define HIDPP_ERROR_UNKNOWN_DEVICE 0x08 #define HIDPP_ERROR_RESOURCE_ERROR 0x09 #define HIDPP_ERROR_REQUEST_UNAVAILABLE 0x0a #define HIDPP_ERROR_INVALID_PARAM_VALUE 0x0b #define HIDPP_ERROR_WRONG_PIN_CODE 0x0c /* HID++ 2.0 error codes */ #define HIDPP20_ERROR_NO_ERROR 0x00 #define HIDPP20_ERROR_UNKNOWN 0x01 #define HIDPP20_ERROR_INVALID_ARGS 0x02 #define HIDPP20_ERROR_OUT_OF_RANGE 0x03 #define HIDPP20_ERROR_HW_ERROR 0x04 #define HIDPP20_ERROR_NOT_ALLOWED 0x05 #define HIDPP20_ERROR_INVALID_FEATURE_INDEX 0x06 #define HIDPP20_ERROR_INVALID_FUNCTION_ID 0x07 #define HIDPP20_ERROR_BUSY 0x08 #define HIDPP20_ERROR_UNSUPPORTED 0x09 #define HIDPP20_ERROR 0xff static int __hidpp_send_report(struct hid_device *hdev, struct hidpp_report *hidpp_report) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); int fields_count, ret; switch (hidpp_report->report_id) { case REPORT_ID_HIDPP_SHORT: fields_count = HIDPP_REPORT_SHORT_LENGTH; break; case REPORT_ID_HIDPP_LONG: fields_count = HIDPP_REPORT_LONG_LENGTH; break; case REPORT_ID_HIDPP_VERY_LONG: fields_count = hidpp->very_long_report_length; break; default: return -ENODEV; } /* * set the device_index as the receiver, it will be overwritten by * hid_hw_request if needed */ hidpp_report->device_index = 0xff; if (hidpp->quirks & HIDPP_QUIRK_FORCE_OUTPUT_REPORTS) { ret = hid_hw_output_report(hdev, (u8 *)hidpp_report, fields_count); } else { ret = hid_hw_raw_request(hdev, hidpp_report->report_id, (u8 *)hidpp_report, fields_count, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); } return ret == fields_count ? 0 : -1; } /* * Effectively send the message to the device, waiting for its answer. * * Must be called with hidpp->send_mutex locked * * Same return protocol than hidpp_send_message_sync(): * - success on 0 * - negative error means transport error * - positive value means protocol error */ static int __do_hidpp_send_message_sync(struct hidpp_device *hidpp, struct hidpp_report *message, struct hidpp_report *response) { int ret; __must_hold(&hidpp->send_mutex); hidpp->send_receive_buf = response; hidpp->answer_available = false; /* * So that we can later validate the answer when it arrives * in hidpp_raw_event */ *response = *message; ret = __hidpp_send_report(hidpp->hid_dev, message); if (ret) { dbg_hid("__hidpp_send_report returned err: %d\n", ret); memset(response, 0, sizeof(struct hidpp_report)); return ret; } if (!wait_event_timeout(hidpp->wait, hidpp->answer_available, 5*HZ)) { dbg_hid("%s:timeout waiting for response\n", __func__); memset(response, 0, sizeof(struct hidpp_report)); return -ETIMEDOUT; } if (response->report_id == REPORT_ID_HIDPP_SHORT && response->rap.sub_id == HIDPP_ERROR) { ret = response->rap.params[1]; dbg_hid("%s:got hidpp error %02X\n", __func__, ret); return ret; } if ((response->report_id == REPORT_ID_HIDPP_LONG || response->report_id == REPORT_ID_HIDPP_VERY_LONG) && response->fap.feature_index == HIDPP20_ERROR) { ret = response->fap.params[1]; dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret); return ret; } return 0; } /* * hidpp_send_message_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_message_sync(struct hidpp_device *hidpp, struct hidpp_report *message, struct hidpp_report *response) { int ret; int max_retries = 3; mutex_lock(&hidpp->send_mutex); do { ret = __do_hidpp_send_message_sync(hidpp, message, response); if (ret != HIDPP20_ERROR_BUSY) break; dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret); } while (--max_retries); mutex_unlock(&hidpp->send_mutex); return ret; } /* * hidpp_send_fap_command_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp, u8 feat_index, u8 funcindex_clientid, u8 *params, int param_count, struct hidpp_report *response) { struct hidpp_report *message; int ret; if (param_count > sizeof(message->fap.params)) { hid_dbg(hidpp->hid_dev, "Invalid number of parameters passed to command (%d != %llu)\n", param_count, (unsigned long long) sizeof(message->fap.params)); return -EINVAL; } message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL); if (!message) return -ENOMEM; if (param_count > (HIDPP_REPORT_LONG_LENGTH - 4)) message->report_id = REPORT_ID_HIDPP_VERY_LONG; else message->report_id = REPORT_ID_HIDPP_LONG; message->fap.feature_index = feat_index; message->fap.funcindex_clientid = funcindex_clientid | LINUX_KERNEL_SW_ID; memcpy(&message->fap.params, params, param_count); ret = hidpp_send_message_sync(hidpp, message, response); kfree(message); return ret; } /* * hidpp_send_rap_command_sync() returns 0 in case of success, and something else * in case of a failure. * * See __do_hidpp_send_message_sync() for a detailed explanation of the returned * value. */ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev, u8 report_id, u8 sub_id, u8 reg_address, u8 *params, int param_count, struct hidpp_report *response) { struct hidpp_report *message; int ret, max_count; /* Send as long report if short reports are not supported. */ if (report_id == REPORT_ID_HIDPP_SHORT && !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED)) report_id = REPORT_ID_HIDPP_LONG; switch (report_id) { case REPORT_ID_HIDPP_SHORT: max_count = HIDPP_REPORT_SHORT_LENGTH - 4; break; case REPORT_ID_HIDPP_LONG: max_count = HIDPP_REPORT_LONG_LENGTH - 4; break; case REPORT_ID_HIDPP_VERY_LONG: max_count = hidpp_dev->very_long_report_length - 4; break; default: return -EINVAL; } if (param_count > max_count) return -EINVAL; message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL); if (!message) return -ENOMEM; message->report_id = report_id; message->rap.sub_id = sub_id; message->rap.reg_address = reg_address; memcpy(&message->rap.params, params, param_count); ret = hidpp_send_message_sync(hidpp_dev, message, response); kfree(message); return ret; } static inline bool hidpp_match_answer(struct hidpp_report *question, struct hidpp_report *answer) { return (answer->fap.feature_index == question->fap.feature_index) && (answer->fap.funcindex_clientid == question->fap.funcindex_clientid); } static inline bool hidpp_match_error(struct hidpp_report *question, struct hidpp_report *answer) { return ((answer->rap.sub_id == HIDPP_ERROR) || (answer->fap.feature_index == HIDPP20_ERROR)) && (answer->fap.funcindex_clientid == question->fap.feature_index) && (answer->fap.params[0] == question->fap.funcindex_clientid); } static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp, struct hidpp_report *report) { return (hidpp->wireless_feature_index && (report->fap.feature_index == hidpp->wireless_feature_index)) || ((report->report_id == REPORT_ID_HIDPP_SHORT) && (report->rap.sub_id == 0x41)); } /* * hidpp_prefix_name() prefixes the current given name with "Logitech ". */ static void hidpp_prefix_name(char **name, int name_length) { #define PREFIX_LENGTH 9 /* "Logitech " */ int new_length; char *new_name; if (name_length > PREFIX_LENGTH && strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0) /* The prefix has is already in the name */ return; new_length = PREFIX_LENGTH + name_length; new_name = kzalloc(new_length, GFP_KERNEL); if (!new_name) return; snprintf(new_name, new_length, "Logitech %s", *name); kfree(*name); *name = new_name; } /* * Updates the USB wireless_status based on whether the headset * is turned on and reachable. */ static void hidpp_update_usb_wireless_status(struct hidpp_device *hidpp) { struct hid_device *hdev = hidpp->hid_dev; struct usb_interface *intf; if (!(hidpp->quirks & HIDPP_QUIRK_WIRELESS_STATUS)) return; if (!hid_is_usb(hdev)) return; intf = to_usb_interface(hdev->dev.parent); usb_set_wireless_status(intf, hidpp->battery.online ? USB_WIRELESS_STATUS_CONNECTED : USB_WIRELESS_STATUS_DISCONNECTED); } /** * hidpp_scroll_counter_handle_scroll() - Send high- and low-resolution scroll * events given a high-resolution wheel * movement. * @input_dev: Pointer to the input device * @counter: a hid_scroll_counter struct describing the wheel. * @hi_res_value: the movement of the wheel, in the mouse's high-resolution * units. * * Given a high-resolution movement, this function converts the movement into * fractions of 120 and emits high-resolution scroll events for the input * device. It also uses the multiplier from &struct hid_scroll_counter to * emit low-resolution scroll events when appropriate for * backwards-compatibility with userspace input libraries. */ static void hidpp_scroll_counter_handle_scroll(struct input_dev *input_dev, struct hidpp_scroll_counter *counter, int hi_res_value) { int low_res_value, remainder, direction; unsigned long long now, previous; hi_res_value = hi_res_value * 120/counter->wheel_multiplier; input_report_rel(input_dev, REL_WHEEL_HI_RES, hi_res_value); remainder = counter->remainder; direction = hi_res_value > 0 ? 1 : -1; now = sched_clock(); previous = counter->last_time; counter->last_time = now; /* * Reset the remainder after a period of inactivity or when the * direction changes. This prevents the REL_WHEEL emulation point * from sliding for devices that don't always provide the same * number of movements per detent. */ if (now - previous > 1000000000 || direction != counter->direction) remainder = 0; counter->direction = direction; remainder += hi_res_value; /* Some wheels will rest 7/8ths of a detent from the previous detent * after slow movement, so we want the threshold for low-res events to * be in the middle between two detents (e.g. after 4/8ths) as * opposed to on the detents themselves (8/8ths). */ if (abs(remainder) >= 60) { /* Add (or subtract) 1 because we want to trigger when the wheel * is half-way to the next detent (i.e. scroll 1 detent after a * 1/2 detent movement, 2 detents after a 1 1/2 detent movement, * etc.). */ low_res_value = remainder / 120; if (low_res_value == 0) low_res_value = (hi_res_value > 0 ? 1 : -1); input_report_rel(input_dev, REL_WHEEL, low_res_value); remainder -= low_res_value * 120; } counter->remainder = remainder; } /* -------------------------------------------------------------------------- */ /* HIDP++ 1.0 commands */ /* -------------------------------------------------------------------------- */ #define HIDPP_SET_REGISTER 0x80 #define HIDPP_GET_REGISTER 0x81 #define HIDPP_SET_LONG_REGISTER 0x82 #define HIDPP_GET_LONG_REGISTER 0x83 /** * hidpp10_set_register - Modify a HID++ 1.0 register. * @hidpp_dev: the device to set the register on. * @register_address: the address of the register to modify. * @byte: the byte of the register to modify. Should be less than 3. * @mask: mask of the bits to modify * @value: new values for the bits in mask * Return: 0 if successful, otherwise a negative error code. */ static int hidpp10_set_register(struct hidpp_device *hidpp_dev, u8 register_address, u8 byte, u8 mask, u8 value) { struct hidpp_report response; int ret; u8 params[3] = { 0 }; ret = hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, register_address, NULL, 0, &response); if (ret) return ret; memcpy(params, response.rap.params, 3); params[byte] &= ~mask; params[byte] |= value & mask; return hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_SET_REGISTER, register_address, params, 3, &response); } #define HIDPP_REG_ENABLE_REPORTS 0x00 #define HIDPP_ENABLE_CONSUMER_REPORT BIT(0) #define HIDPP_ENABLE_WHEEL_REPORT BIT(2) #define HIDPP_ENABLE_MOUSE_EXTRA_BTN_REPORT BIT(3) #define HIDPP_ENABLE_BAT_REPORT BIT(4) #define HIDPP_ENABLE_HWHEEL_REPORT BIT(5) static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev) { return hidpp10_set_register(hidpp_dev, HIDPP_REG_ENABLE_REPORTS, 0, HIDPP_ENABLE_BAT_REPORT, HIDPP_ENABLE_BAT_REPORT); } #define HIDPP_REG_FEATURES 0x01 #define HIDPP_ENABLE_SPECIAL_BUTTON_FUNC BIT(1) #define HIDPP_ENABLE_FAST_SCROLL BIT(6) /* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */ static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev) { return hidpp10_set_register(hidpp_dev, HIDPP_REG_FEATURES, 0, HIDPP_ENABLE_FAST_SCROLL, HIDPP_ENABLE_FAST_SCROLL); } #define HIDPP_REG_BATTERY_STATUS 0x07 static int hidpp10_battery_status_map_level(u8 param) { int level; switch (param) { case 1 ... 2: level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; break; case 3 ... 4: level = POWER_SUPPLY_CAPACITY_LEVEL_LOW; break; case 5 ... 6: level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; break; case 7: level = POWER_SUPPLY_CAPACITY_LEVEL_HIGH; break; default: level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; } return level; } static int hidpp10_battery_status_map_status(u8 param) { int status; switch (param) { case 0x00: /* discharging (in use) */ status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x21: /* (standard) charging */ case 0x24: /* fast charging */ case 0x25: /* slow charging */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x26: /* topping charge */ case 0x22: /* charge complete */ status = POWER_SUPPLY_STATUS_FULL; break; case 0x20: /* unknown */ status = POWER_SUPPLY_STATUS_UNKNOWN; break; /* * 0x01...0x1F = reserved (not charging) * 0x23 = charging error * 0x27..0xff = reserved */ default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp10_query_battery_status(struct hidpp_device *hidpp) { struct hidpp_report response; int ret, status; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, HIDPP_REG_BATTERY_STATUS, NULL, 0, &response); if (ret) return ret; hidpp->battery.level = hidpp10_battery_status_map_level(response.rap.params[0]); status = hidpp10_battery_status_map_status(response.rap.params[1]); hidpp->battery.status = status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; return 0; } #define HIDPP_REG_BATTERY_MILEAGE 0x0D static int hidpp10_battery_mileage_map_status(u8 param) { int status; switch (param >> 6) { case 0x00: /* discharging (in use) */ status = POWER_SUPPLY_STATUS_DISCHARGING; break; case 0x01: /* charging */ status = POWER_SUPPLY_STATUS_CHARGING; break; case 0x02: /* charge complete */ status = POWER_SUPPLY_STATUS_FULL; break; /* * 0x03 = charging error */ default: status = POWER_SUPPLY_STATUS_NOT_CHARGING; break; } return status; } static int hidpp10_query_battery_mileage(struct hidpp_device *hidpp) { struct hidpp_report response; int ret, status; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_REGISTER, HIDPP_REG_BATTERY_MILEAGE, NULL, 0, &response); if (ret) return ret; hidpp->battery.capacity = response.rap.params[0]; status = hidpp10_battery_mileage_map_status(response.rap.params[2]); hidpp->battery.status = status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; return 0; } static int hidpp10_battery_event(struct hidpp_device *hidpp, u8 *data, int size) { struct hidpp_report *report = (struct hidpp_report *)data; int status, capacity, level; bool changed; if (report->report_id != REPORT_ID_HIDPP_SHORT) return 0; switch (report->rap.sub_id) { case HIDPP_REG_BATTERY_STATUS: capacity = hidpp->battery.capacity; level = hidpp10_battery_status_map_level(report->rawbytes[1]); status = hidpp10_battery_status_map_status(report->rawbytes[2]); break; case HIDPP_REG_BATTERY_MILEAGE: capacity = report->rap.params[0]; level = hidpp->battery.level; status = hidpp10_battery_mileage_map_status(report->rawbytes[3]); break; default: return 0; } changed = capacity != hidpp->battery.capacity || level != hidpp->battery.level || status != hidpp->battery.status; /* the capacity is only available when discharging or full */ hidpp->battery.online = status == POWER_SUPPLY_STATUS_DISCHARGING || status == POWER_SUPPLY_STATUS_FULL; if (changed) { hidpp->battery.level = level; hidpp->battery.status = status; if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); } return 0; } #define HIDPP_REG_PAIRING_INFORMATION 0xB5 #define HIDPP_EXTENDED_PAIRING 0x30 #define HIDPP_DEVICE_NAME 0x40 static char *hidpp_unifying_get_name(struct hidpp_device *hidpp_dev) { struct hidpp_report response; int ret; u8 params[1] = { HIDPP_DEVICE_NAME }; char *name; int len; ret = hidpp_send_rap_command_sync(hidpp_dev, REPORT_ID_HIDPP_SHORT, HIDPP_GET_LONG_REGISTER, HIDPP_REG_PAIRING_INFORMATION, params, 1, &response); if (ret) return NULL; len = response.rap.params[1]; if (2 + len > sizeof(response.rap.params)) return NULL; if (len < 4) /* logitech devices are usually at least Xddd */ return NULL; name = kzalloc(len + 1, GFP_KERNEL); if (!name) return NULL; memcpy(name, &response.rap.params[2], len); /* include the terminating '\0' */ hidpp_prefix_name(&name, len + 1); return name; } static int hidpp_unifying_get_serial(struct hidpp_device *hidpp, u32 *serial) { struct hidpp_report response; int ret; u8 params[1] = { HIDPP_EXTENDED_PAIRING }; ret = hidpp_send_rap_command_sync(hidpp, REPORT_ID_HIDPP_SHORT, HIDPP_GET_LONG_REGISTER, HIDPP_REG_PAIRING_INFORMATION, params, 1, &response); if (ret) return ret; /* * We don't care about LE or BE, we will output it as a