Total coverage: 453057 (23%)of 1995072
27 28 28 22 27 26 28 28 27 28 28 27 28 28 28 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 // SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 ARM Ltd. * * Generic implementation of update_vsyscall and update_vsyscall_tz. * * Based on the x86 specific implementation. */ #include <linux/hrtimer.h> #include <linux/timekeeper_internal.h> #include <vdso/datapage.h> #include <vdso/helpers.h> #include <vdso/vsyscall.h> #include "timekeeping_internal.h" static inline void update_vdso_data(struct vdso_data *vdata, struct timekeeper *tk) { struct vdso_timestamp *vdso_ts; u64 nsec, sec; vdata[CS_HRES_COARSE].cycle_last = tk->tkr_mono.cycle_last; #ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT vdata[CS_HRES_COARSE].max_cycles = tk->tkr_mono.clock->max_cycles; #endif vdata[CS_HRES_COARSE].mask = tk->tkr_mono.mask; vdata[CS_HRES_COARSE].mult = tk->tkr_mono.mult; vdata[CS_HRES_COARSE].shift = tk->tkr_mono.shift; vdata[CS_RAW].cycle_last = tk->tkr_raw.cycle_last; #ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT vdata[CS_RAW].max_cycles = tk->tkr_raw.clock->max_cycles; #endif vdata[CS_RAW].mask = tk->tkr_raw.mask; vdata[CS_RAW].mult = tk->tkr_raw.mult; vdata[CS_RAW].shift = tk->tkr_raw.shift; /* CLOCK_MONOTONIC */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; nsec = tk->tkr_mono.xtime_nsec; nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift); while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); vdso_ts->sec++; } vdso_ts->nsec = nsec; /* Copy MONOTONIC time for BOOTTIME */ sec = vdso_ts->sec; /* Add the boot offset */ sec += tk->monotonic_to_boot.tv_sec; nsec += (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift; /* CLOCK_BOOTTIME */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_BOOTTIME]; vdso_ts->sec = sec; while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift); vdso_ts->sec++; } vdso_ts->nsec = nsec; /* CLOCK_MONOTONIC_RAW */ vdso_ts = &vdata[CS_RAW].basetime[CLOCK_MONOTONIC_RAW]; vdso_ts->sec = tk->raw_sec; vdso_ts->nsec = tk->tkr_raw.xtime_nsec; /* CLOCK_TAI */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; vdso_ts->nsec = tk->tkr_mono.xtime_nsec; } void update_vsyscall(struct timekeeper *tk) { struct vdso_data *vdata = __arch_get_k_vdso_data(); struct vdso_timestamp *vdso_ts; s32 clock_mode; u64 nsec; /* copy vsyscall data */ vdso_write_begin(vdata); clock_mode = tk->tkr_mono.clock->vdso_clock_mode; vdata[CS_HRES_COARSE].clock_mode = clock_mode; vdata[CS_RAW].clock_mode = clock_mode; /* CLOCK_REALTIME also required for time() */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; vdso_ts->sec = tk->xtime_sec; vdso_ts->nsec = tk->tkr_mono.xtime_nsec; /* CLOCK_REALTIME_COARSE */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; vdso_ts->sec = tk->xtime_sec; vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; /* CLOCK_MONOTONIC_COARSE */ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE]; vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift; nsec = nsec + tk->wall_to_monotonic.tv_nsec; vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec); /* * Read without the seqlock held by clock_getres(). * Note: No need to have a second copy. */ WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); /* * If the current clocksource is not VDSO capable, then spare the * update of the high resolution parts. */ if (clock_mode != VDSO_CLOCKMODE_NONE) update_vdso_data(vdata, tk); __arch_update_vsyscall(vdata); vdso_write_end(vdata); __arch_sync_vdso_data(vdata); } void update_vsyscall_tz(void) { struct vdso_data *vdata = __arch_get_k_vdso_data(); vdata[CS_HRES_COARSE].tz_minuteswest = sys_tz.tz_minuteswest; vdata[CS_HRES_COARSE].tz_dsttime = sys_tz.tz_dsttime; __arch_sync_vdso_data(vdata); } /** * vdso_update_begin - Start of a VDSO update section * * Allows architecture code to safely update the architecture specific VDSO * data. Disables interrupts, acquires timekeeper lock to serialize against * concurrent updates from timekeeping and invalidates the VDSO data * sequence counter to prevent concurrent readers from accessing * inconsistent data. * * Returns: Saved interrupt flags which need to be handed in to * vdso_update_end(). */ unsigned long vdso_update_begin(void) { struct vdso_data *vdata = __arch_get_k_vdso_data(); unsigned long flags = timekeeper_lock_irqsave(); vdso_write_begin(vdata); return flags; } /** * vdso_update_end - End of a VDSO update section * @flags: Interrupt flags as returned from vdso_update_begin() * * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data * synchronization if the architecture requires it, drops timekeeper lock * and restores interrupt flags. */ void vdso_update_end(unsigned long flags) { struct vdso_data *vdata = __arch_get_k_vdso_data(); vdso_write_end(vdata); __arch_sync_vdso_data(vdata); timekeeper_unlock_irqrestore(flags); }
225 225 225 224 3 3 188 189 189 189 189 189 189 189 189 189 189 189 189 188 189 189 190 191 190 191 191 190 190 191 191 294 190 295 3 3 3 3 3 3 3 3 3 3 3 3 39 3 1 1 1 259 259 259 3 2 1 259 259 2 2 2 2 2 2 222 223 223 222 221 223 213 36 35 35 35 35 36 36 36 36 36 36 36 9 36 223 224 159 159 159 81 223 225 165 223 223 92 225 36 223 36 223 36 36 36 15 36 35 36 35 222 223 222 223 36 36 223 2 1 224 81 1 80 81 3 1 81 8 8 123 123 123 121 121 121 121 121 121 119 121 121 121 118 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 // SPDX-License-Identifier: GPL-2.0 #include "linux/spinlock.h" #include <linux/minmax.h> #include "misc.h" #include "ctree.h" #include "space-info.h" #include "sysfs.h" #include "volumes.h" #include "free-space-cache.h" #include "ordered-data.h" #include "transaction.h" #include "block-group.h" #include "fs.h" #include "accessors.h" #include "extent-tree.h" #include "zoned.h" /* * HOW DOES SPACE RESERVATION WORK * * If you want to know about delalloc specifically, there is a separate comment * for that with the delalloc code. This comment is about how the whole system * works generally. * * BASIC CONCEPTS * * 1) space_info. This is the ultimate arbiter of how much space we can use. * There's a description of the bytes_ fields with the struct declaration, * refer to that for specifics on each field. Suffice it to say that for * reservations we care about total_bytes - SUM(space_info->bytes_) when * determining if there is space to make an allocation. There is a space_info * for METADATA, SYSTEM, and DATA areas. * * 2) block_rsv's. These are basically buckets for every different type of * metadata reservation we have. You can see the comment in the block_rsv * code on the rules for each type, but generally block_rsv->reserved is how * much space is accounted for in space_info->bytes_may_use. * * 3) btrfs_calc*_size. These are the worst case calculations we used based * on the number of items we will want to modify. We have one for changing * items, and one for inserting new items. Generally we use these helpers to * determine the size of the block reserves, and then use the actual bytes * values to adjust the space_info counters. * * MAKING RESERVATIONS, THE NORMAL CASE * * We call into either btrfs_reserve_data_bytes() or * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with * num_bytes we want to reserve. * * ->reserve * space_info->bytes_may_reserve += num_bytes * * ->extent allocation * Call btrfs_add_reserved_bytes() which does * space_info->bytes_may_reserve -= num_bytes * space_info->bytes_reserved += extent_bytes * * ->insert reference * Call btrfs_update_block_group() which does * space_info->bytes_reserved -= extent_bytes * space_info->bytes_used += extent_bytes * * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority) * * Assume we are unable to simply make the reservation because we do not have * enough space * * -> __reserve_bytes * create a reserve_ticket with ->bytes set to our reservation, add it to * the tail of space_info->tickets, kick async flush thread * * ->handle_reserve_ticket * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set * on the ticket. * * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space * Flushes various things attempting to free up space. * * -> btrfs_try_granting_tickets() * This is called by anything that either subtracts space from * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the * space_info->total_bytes. This loops through the ->priority_tickets and * then the ->tickets list checking to see if the reservation can be * completed. If it can the space is added to space_info->bytes_may_use and * the ticket is woken up. * * -> ticket wakeup * Check if ->bytes == 0, if it does we got our reservation and we can carry * on, if not return the appropriate error (ENOSPC, but can be EINTR if we * were interrupted.) * * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY * * Same as the above, except we add ourselves to the * space_info->priority_tickets, and we do not use ticket->wait, we simply * call flush_space() ourselves for the states that are safe for us to call * without deadlocking and hope for the best. * * THE FLUSHING STATES * * Generally speaking we will have two cases for each state, a "nice" state * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to * reduce the locking over head on the various trees, and even to keep from * doing any work at all in the case of delayed refs. Each of these delayed * things however hold reservations, and so letting them run allows us to * reclaim space so we can make new reservations. * * FLUSH_DELAYED_ITEMS * Every inode has a delayed item to update the inode. Take a simple write * for example, we would update the inode item at write time to update the * mtime, and then again at finish_ordered_io() time in order to update the * isize or bytes. We keep these delayed items to coalesce these operations * into a single operation done on demand. These are an easy way to reclaim * metadata space. * * FLUSH_DELALLOC * Look at the delalloc comment to get an idea of how much space is reserved * for delayed allocation. We can reclaim some of this space simply by * running delalloc, but usually we need to wait for ordered extents to * reclaim the bulk of this space. * * FLUSH_DELAYED_REFS * We have a block reserve for the outstanding delayed refs space, and every * delayed ref operation holds a reservation. Running these is a quick way * to reclaim space, but we want to hold this until the end because COW can * churn a lot and we can avoid making some extent tree modifications if we * are able to delay for as long as possible. * * RESET_ZONES * This state works only for the zoned mode. On the zoned mode, we cannot * reuse once allocated then freed region until we reset the zone, due to * the sequential write zone requirement. The RESET_ZONES state resets the * zones of an unused block group and let us reuse the space. The reusing * is faster than removing the block group and allocating another block * group on the zones. * * ALLOC_CHUNK * We will skip this the first time through space reservation, because of * overcommit and we don't want to have a lot of useless metadata space when * our worst case reservations will likely never come true. * * RUN_DELAYED_IPUTS * If we're freeing inodes we're likely freeing checksums, file extent * items, and extent tree items. Loads of space could be freed up by these * operations, however they won't be usable until the transaction commits. * * COMMIT_TRANS * This will commit the transaction. Historically we had a lot of logic * surrounding whether or not we'd commit the transaction, but this waits born * out of a pre-tickets era where we could end up committing the transaction * thousands of times in a row without making progress. Now thanks to our * ticketing system we know if we're not making progress and can error * everybody out after a few commits rather than burning the disk hoping for * a different answer. * * OVERCOMMIT * * Because we hold so many reservations for metadata we will allow you to * reserve more space than is currently free in the currently allocate * metadata space. This only happens with metadata, data does not allow * overcommitting. * * You can see the current logic for when we allow overcommit in * btrfs_can_overcommit(), but it only applies to unallocated space. If there * is no unallocated space to be had, all reservations are kept within the * free space in the allocated metadata chunks. * * Because of overcommitting, you generally want to use the * btrfs_can_overcommit() logic for metadata allocations, as it does the right * thing with or without extra unallocated space. */ u64 __pure btrfs_space_info_used(const struct btrfs_space_info *s_info, bool may_use_included) { ASSERT(s_info); return s_info->bytes_used + s_info->bytes_reserved + s_info->bytes_pinned + s_info->bytes_readonly + s_info->bytes_zone_unusable + (may_use_included ? s_info->bytes_may_use : 0); } /* * after adding space to the filesystem, we need to clear the full flags * on all the space infos. */ void btrfs_clear_space_info_full(struct btrfs_fs_info *info) { struct list_head *head = &info->space_info; struct btrfs_space_info *found; list_for_each_entry(found, head, list) found->full = 0; } /* * Block groups with more than this value (percents) of unusable space will be * scheduled for background reclaim. */ #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75) #define BTRFS_UNALLOC_BLOCK_GROUP_TARGET (10ULL) /* * Calculate chunk size depending on volume type (regular or zoned). */ static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags) { if (btrfs_is_zoned(fs_info)) return fs_info->zone_size; ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK); if (flags & BTRFS_BLOCK_GROUP_DATA) return BTRFS_MAX_DATA_CHUNK_SIZE; else if (flags & BTRFS_BLOCK_GROUP_SYSTEM) return SZ_32M; /* Handle BTRFS_BLOCK_GROUP_METADATA */ if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G) return SZ_1G; return SZ_256M; } /* * Update default chunk size. */ void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info, u64 chunk_size) { WRITE_ONCE(space_info->chunk_size, chunk_size); } static int create_space_info(struct btrfs_fs_info *info, u64 flags) { struct btrfs_space_info *space_info; int i; int ret; space_info = kzalloc(sizeof(*space_info), GFP_NOFS); if (!space_info) return -ENOMEM; space_info->fs_info = info; for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) INIT_LIST_HEAD(&space_info->block_groups[i]); init_rwsem(&space_info->groups_sem); spin_lock_init(&space_info->lock); space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK; space_info->force_alloc = CHUNK_ALLOC_NO_FORCE; INIT_LIST_HEAD(&space_info->ro_bgs); INIT_LIST_HEAD(&space_info->tickets); INIT_LIST_HEAD(&space_info->priority_tickets); space_info->clamp = 1; btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags)); if (btrfs_is_zoned(info)) space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH; ret = btrfs_sysfs_add_space_info_type(info, space_info); if (ret) return ret; list_add(&space_info->list, &info->space_info); if (flags & BTRFS_BLOCK_GROUP_DATA) info->data_sinfo = space_info; return ret; } int btrfs_init_space_info(struct btrfs_fs_info *fs_info) { struct btrfs_super_block *disk_super; u64 features; u64 flags; int mixed = 0; int ret; disk_super = fs_info->super_copy; if (!btrfs_super_root(disk_super)) return -EINVAL; features = btrfs_super_incompat_flags(disk_super); if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) mixed = 1; flags = BTRFS_BLOCK_GROUP_SYSTEM; ret = create_space_info(fs_info, flags); if (ret) goto out; if (mixed) { flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA; ret = create_space_info(fs_info, flags); } else { flags = BTRFS_BLOCK_GROUP_METADATA; ret = create_space_info(fs_info, flags); if (ret) goto out; flags = BTRFS_BLOCK_GROUP_DATA; ret = create_space_info(fs_info, flags); } out: return ret; } void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info, struct btrfs_block_group *block_group) { struct btrfs_space_info *found; int factor, index; factor = btrfs_bg_type_to_factor(block_group->flags); found = btrfs_find_space_info(info, block_group->flags); ASSERT(found); spin_lock(&found->lock); found->total_bytes += block_group->length; found->disk_total += block_group->length * factor; found->bytes_used += block_group->used; found->disk_used += block_group->used * factor; found->bytes_readonly += block_group->bytes_super; btrfs_space_info_update_bytes_zone_unusable(found, block_group->zone_unusable); if (block_group->length > 0) found->full = 0; btrfs_try_granting_tickets(info, found); spin_unlock(&found->lock); block_group->space_info = found; index = btrfs_bg_flags_to_raid_index(block_group->flags); down_write(&found->groups_sem); list_add_tail(&block_group->list, &found->block_groups[index]); up_write(&found->groups_sem); } struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info, u64 flags) { struct list_head *head = &info->space_info; struct btrfs_space_info *found; flags &= BTRFS_BLOCK_GROUP_TYPE_MASK; list_for_each_entry(found, head, list) { if (found->flags & flags) return found; } return NULL; } static u64 calc_effective_data_chunk_size(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *data_sinfo; u64 data_chunk_size; /* * Calculate the data_chunk_size, space_info->chunk_size is the * "optimal" chunk size based on the fs size. However when we actually * allocate the chunk we will strip this down further, making it no * more than 10% of the disk or 1G, whichever is smaller. * * On the zoned mode, we need to use zone_size (= data_sinfo->chunk_size) * as it is. */ data_sinfo = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); if (btrfs_is_zoned(fs_info)) return data_sinfo->chunk_size; data_chunk_size = min(data_sinfo->chunk_size, mult_perc(fs_info->fs_devices->total_rw_bytes, 10)); return min_t(u64, data_chunk_size, SZ_1G); } static u64 calc_available_free_space(struct btrfs_fs_info *fs_info, const struct btrfs_space_info *space_info, enum btrfs_reserve_flush_enum flush) { u64 profile; u64 avail; u64 data_chunk_size; int factor; if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) profile = btrfs_system_alloc_profile(fs_info); else profile = btrfs_metadata_alloc_profile(fs_info); avail = atomic64_read(&fs_info->free_chunk_space); /* * If we have dup, raid1 or raid10 then only half of the free * space is actually usable. For raid56, the space info used * doesn't include the parity drive, so we don't have to * change the math */ factor = btrfs_bg_type_to_factor(profile); avail = div_u64(avail, factor); if (avail == 0) return 0; data_chunk_size = calc_effective_data_chunk_size(fs_info); /* * Since data allocations immediately use block groups as part of the * reservation, because we assume that data reservations will == actual * usage, we could potentially overcommit and then immediately have that * available space used by a data allocation, which could put us in a * bind when we get close to filling the file system. * * To handle this simply remove the data_chunk_size from the available * space. If we are relatively empty this won't affect our ability to * overcommit much, and if we're very close to full it'll keep us from * getting into a position where we've given ourselves very little * metadata wiggle room. */ if (avail <= data_chunk_size) return 0; avail -= data_chunk_size; /* * If we aren't flushing all things, let us overcommit up to * 1/2th of the space. If we can flush, don't let us overcommit * too much, let it overcommit up to 1/8 of the space. */ if (flush == BTRFS_RESERVE_FLUSH_ALL) avail >>= 3; else avail >>= 1; /* * On the zoned mode, we always allocate one zone as one chunk. * Returning non-zone size alingned bytes here will result in * less pressure for the async metadata reclaim process, and it * will over-commit too much leading to ENOSPC. Align down to the * zone size to avoid that. */ if (btrfs_is_zoned(fs_info)) avail = ALIGN_DOWN(avail, fs_info->zone_size); return avail; } int btrfs_can_overcommit(struct btrfs_fs_info *fs_info, const struct btrfs_space_info *space_info, u64 bytes, enum btrfs_reserve_flush_enum flush) { u64 avail; u64 used; /* Don't overcommit when in mixed mode */ if (space_info->flags & BTRFS_BLOCK_GROUP_DATA) return 0; used = btrfs_space_info_used(space_info, true); avail = calc_available_free_space(fs_info, space_info, flush); if (used + bytes < space_info->total_bytes + avail) return 1; return 0; } static void remove_ticket(struct btrfs_space_info *space_info, struct reserve_ticket *ticket) { if (!list_empty(&ticket->list)) { list_del_init(&ticket->list); ASSERT(space_info->reclaim_size >= ticket->bytes); space_info->reclaim_size -= ticket->bytes; } } /* * This is for space we already have accounted in space_info->bytes_may_use, so * basically when we're returning space from block_rsv's. */ void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info) { struct list_head *head; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH; lockdep_assert_held(&space_info->lock); head = &space_info->priority_tickets; again: while (!list_empty(head)) { struct reserve_ticket *ticket; u64 used = btrfs_space_info_used(space_info, true); ticket = list_first_entry(head, struct reserve_ticket, list); /* Check and see if our ticket can be satisfied now. */ if ((used + ticket->bytes <= space_info->total_bytes) || btrfs_can_overcommit(fs_info, space_info, ticket->bytes, flush)) { btrfs_space_info_update_bytes_may_use(space_info, ticket->bytes); remove_ticket(space_info, ticket); ticket->bytes = 0; space_info->tickets_id++; wake_up(&ticket->wait); } else { break; } } if (head == &space_info->priority_tickets) { head = &space_info->tickets; flush = BTRFS_RESERVE_FLUSH_ALL; goto again; } } #define DUMP_BLOCK_RSV(fs_info, rsv_name) \ do { \ struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \ spin_lock(&__rsv->lock); \ btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \ __rsv->size, __rsv->reserved); \ spin_unlock(&__rsv->lock); \ } while (0) static const char *space_info_flag_to_str(const struct btrfs_space_info *space_info) { switch (space_info->flags) { case BTRFS_BLOCK_GROUP_SYSTEM: return "SYSTEM"; case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA: return "DATA+METADATA"; case BTRFS_BLOCK_GROUP_DATA: return "DATA"; case BTRFS_BLOCK_GROUP_METADATA: return "METADATA"; default: return "UNKNOWN"; } } static void dump_global_block_rsv(struct btrfs_fs_info *fs_info) { DUMP_BLOCK_RSV(fs_info, global_block_rsv); DUMP_BLOCK_RSV(fs_info, trans_block_rsv); DUMP_BLOCK_RSV(fs_info, chunk_block_rsv); DUMP_BLOCK_RSV(fs_info, delayed_block_rsv); DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv); } static void __btrfs_dump_space_info(const struct btrfs_fs_info *fs_info, const struct btrfs_space_info *info) { const char *flag_str = space_info_flag_to_str(info); lockdep_assert_held(&info->lock); /* The free space could be negative in case of overcommit */ btrfs_info(fs_info, "space_info %s has %lld free, is %sfull", flag_str, (s64)(info->total_bytes - btrfs_space_info_used(info, true)), info->full ? "" : "not "); btrfs_info(fs_info, "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu", info->total_bytes, info->bytes_used, info->bytes_pinned, info->bytes_reserved, info->bytes_may_use, info->bytes_readonly, info->bytes_zone_unusable); } void btrfs_dump_space_info(struct btrfs_fs_info *fs_info, struct btrfs_space_info *info, u64 bytes, int dump_block_groups) { struct btrfs_block_group *cache; u64 total_avail = 0; int index = 0; spin_lock(&info->lock); __btrfs_dump_space_info(fs_info, info); dump_global_block_rsv(fs_info); spin_unlock(&info->lock); if (!dump_block_groups) return; down_read(&info->groups_sem); again: list_for_each_entry(cache, &info->block_groups[index], list) { u64 avail; spin_lock(&cache->lock); avail = cache->length - cache->used - cache->pinned - cache->reserved - cache->bytes_super - cache->zone_unusable; btrfs_info(fs_info, "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s", cache->start, cache->length, cache->used, cache->pinned, cache->reserved, cache->delalloc_bytes, cache->bytes_super, cache->zone_unusable, avail, cache->ro ? "[readonly]" : ""); spin_unlock(&cache->lock); btrfs_dump_free_space(cache, bytes); total_avail += avail; } if (++index < BTRFS_NR_RAID_TYPES) goto again; up_read(&info->groups_sem); btrfs_info(fs_info, "%llu bytes available across all block groups", total_avail); } static inline u64 calc_reclaim_items_nr(const struct btrfs_fs_info *fs_info, u64 to_reclaim) { u64 bytes; u64 nr; bytes = btrfs_calc_insert_metadata_size(fs_info, 1); nr = div64_u64(to_reclaim, bytes); if (!nr) nr = 1; return nr; } /* * shrink metadata reservation for delalloc */ static void shrink_delalloc(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 to_reclaim, bool wait_ordered, bool for_preempt) { struct btrfs_trans_handle *trans; u64 delalloc_bytes; u64 ordered_bytes; u64 items; long time_left; int loops; delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes); ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes); if (delalloc_bytes == 0 && ordered_bytes == 0) return; /* Calc the number of the pages we need flush for space reservation */ if (to_reclaim == U64_MAX) { items = U64_MAX; } else { /* * to_reclaim is set to however much metadata we need to * reclaim, but reclaiming that much data doesn't really track * exactly. What we really want to do is reclaim full inode's * worth of reservations, however that's not available to us * here. We will take a fraction of the delalloc bytes for our * flushing loops and hope for the best. Delalloc will expand * the amount we write to cover an entire dirty extent, which * will reclaim the metadata reservation for that range. If * it's not enough subsequent flush stages will be more * aggressive. */ to_reclaim = max(to_reclaim, delalloc_bytes >> 3); items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2; } trans = current->journal_info; /* * If we are doing more ordered than delalloc we need to just wait on * ordered extents, otherwise we'll waste time trying to flush delalloc * that likely won't give us the space back we need. */ if (ordered_bytes > delalloc_bytes && !for_preempt) wait_ordered = true; loops = 0; while ((delalloc_bytes || ordered_bytes) && loops < 3) { u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT; long nr_pages = min_t(u64, temp, LONG_MAX); int async_pages; btrfs_start_delalloc_roots(fs_info, nr_pages, true); /* * We need to make sure any outstanding async pages are now * processed before we continue. This is because things like * sync_inode() try to be smart and skip writing if the inode is * marked clean. We don't use filemap_fwrite for flushing * because we want to control how many pages we write out at a * time, thus this is the only safe way to make sure we've * waited for outstanding compressed workers to have started * their jobs and thus have ordered extents set up properly. * * This exists because we do not want to wait for each * individual inode to finish its async work, we simply want to * start the IO on everybody, and then come back here and wait * for all of the async work to catch up. Once we're done with * that we know we'll have ordered extents for everything and we * can decide if we wait for that or not. * * If we choose to replace this in the future, make absolutely * sure that the proper waiting is being done in the async case, * as there have been bugs in that area before. */ async_pages = atomic_read(&fs_info->async_delalloc_pages); if (!async_pages) goto skip_async; /* * We don't want to wait forever, if we wrote less pages in this * loop than we have outstanding, only wait for that number of * pages, otherwise we can wait for all async pages to finish * before continuing. */ if (async_pages > nr_pages) async_pages -= nr_pages; else async_pages = 0; wait_event(fs_info->async_submit_wait, atomic_read(&fs_info->async_delalloc_pages) <= async_pages); skip_async: loops++; if (wait_ordered && !trans) { btrfs_wait_ordered_roots(fs_info, items, NULL); } else { time_left = schedule_timeout_killable(1); if (time_left) break; } /* * If we are for preemption we just want a one-shot of delalloc * flushing so we can stop flushing if we decide we don't need * to anymore. */ if (for_preempt) break; spin_lock(&space_info->lock); if (list_empty(&space_info->tickets) && list_empty(&space_info->priority_tickets)) { spin_unlock(&space_info->lock); break; } spin_unlock(&space_info->lock); delalloc_bytes = percpu_counter_sum_positive( &fs_info->delalloc_bytes); ordered_bytes = percpu_counter_sum_positive( &fs_info->ordered_bytes); } } /* * Try to flush some data based on policy set by @state. This is only advisory * and may fail for various reasons. The caller is supposed to examine the * state of @space_info to detect the outcome. */ static void flush_space(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 num_bytes, enum btrfs_flush_state state, bool for_preempt) { struct btrfs_root *root = fs_info->tree_root; struct btrfs_trans_handle *trans; int nr; int ret = 0; switch (state) { case FLUSH_DELAYED_ITEMS_NR: case FLUSH_DELAYED_ITEMS: if (state == FLUSH_DELAYED_ITEMS_NR) nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2; else nr = -1; trans = btrfs_join_transaction_nostart(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); if (ret == -ENOENT) ret = 0; break; } ret = btrfs_run_delayed_items_nr(trans, nr); btrfs_end_transaction(trans); break; case FLUSH_DELALLOC: case FLUSH_DELALLOC_WAIT: case FLUSH_DELALLOC_FULL: if (state == FLUSH_DELALLOC_FULL) num_bytes = U64_MAX; shrink_delalloc(fs_info, space_info, num_bytes, state != FLUSH_DELALLOC, for_preempt); break; case FLUSH_DELAYED_REFS_NR: case FLUSH_DELAYED_REFS: trans = btrfs_join_transaction_nostart(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); if (ret == -ENOENT) ret = 0; break; } if (state == FLUSH_DELAYED_REFS_NR) btrfs_run_delayed_refs(trans, num_bytes); else btrfs_run_delayed_refs(trans, 0); btrfs_end_transaction(trans); break; case ALLOC_CHUNK: case ALLOC_CHUNK_FORCE: trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); break; } ret = btrfs_chunk_alloc(trans, btrfs_get_alloc_profile(fs_info, space_info->flags), (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE : CHUNK_ALLOC_FORCE); btrfs_end_transaction(trans); if (ret > 0 || ret == -ENOSPC) ret = 0; break; case RUN_DELAYED_IPUTS: /* * If we have pending delayed iputs then we could free up a * bunch of pinned space, so make sure we run the iputs before * we do our pinned bytes check below. */ btrfs_run_delayed_iputs(fs_info); btrfs_wait_on_delayed_iputs(fs_info); break; case COMMIT_TRANS: ASSERT(current->journal_info == NULL); /* * We don't want to start a new transaction, just attach to the * current one or wait it fully commits in case its commit is * happening at the moment. Note: we don't use a nostart join * because that does not wait for a transaction to fully commit * (only for it to be unblocked, state TRANS_STATE_UNBLOCKED). */ ret = btrfs_commit_current_transaction(root); break; case RESET_ZONES: ret = btrfs_reset_unused_block_groups(space_info, num_bytes); break; default: ret = -ENOSPC; break; } trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state, ret, for_preempt); return; } static u64 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info, const struct btrfs_space_info *space_info) { u64 used; u64 avail; u64 to_reclaim = space_info->reclaim_size; lockdep_assert_held(&space_info->lock); avail = calc_available_free_space(fs_info, space_info, BTRFS_RESERVE_FLUSH_ALL); used = btrfs_space_info_used(space_info, true); /* * We may be flushing because suddenly we have less space than we had * before, and now we're well over-committed based on our current free * space. If that's the case add in our overage so we make sure to put * appropriate pressure on the flushing state machine. */ if (space_info->total_bytes + avail < used) to_reclaim += used - (space_info->total_bytes + avail); return to_reclaim; } static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info, const struct btrfs_space_info *space_info) { const u64 global_rsv_size = btrfs_block_rsv_reserved(&fs_info->global_block_rsv); u64 ordered, delalloc; u64 thresh; u64 used; thresh = mult_perc(space_info->total_bytes, 90); lockdep_assert_held(&space_info->lock); /* If we're just plain full then async reclaim just slows us down. */ if ((space_info->bytes_used + space_info->bytes_reserved + global_rsv_size) >= thresh) return false; used = space_info->bytes_may_use + space_info->bytes_pinned; /* The total flushable belongs to the global rsv, don't flush. */ if (global_rsv_size >= used) return false; /* * 128MiB is 1/4 of the maximum global rsv size. If we have less than * that devoted to other reservations then there's no sense in flushing, * we don't have a lot of things that need flushing. */ if (used - global_rsv_size <= SZ_128M) return false; /* * We have tickets queued, bail so we don't compete with the async * flushers. */ if (space_info->reclaim_size) return false; /* * If we have over half of the free space occupied by reservations or * pinned then we want to start flushing. * * We do not do the traditional thing here, which is to say * * if (used >= ((total_bytes + avail) / 2)) * return 1; * * because this doesn't quite work how we want. If we had more than 50% * of the space_info used by bytes_used and we had 0 available we'd just * constantly run the background flusher. Instead we want it to kick in * if our reclaimable space exceeds our clamped free space. * * Our clamping range is 2^1 -> 2^8. Practically speaking that means * the following: * * Amount of RAM Minimum threshold Maximum threshold * * 256GiB 1GiB 128GiB * 128GiB 512MiB 64GiB * 64GiB 256MiB 32GiB * 32GiB 128MiB 16GiB * 16GiB 64MiB 8GiB * * These are the range our thresholds will fall in, corresponding to how * much delalloc we need for the background flusher to kick in. */ thresh = calc_available_free_space(fs_info, space_info, BTRFS_RESERVE_FLUSH_ALL); used = space_info->bytes_used + space_info->bytes_reserved + space_info->bytes_readonly + global_rsv_size; if (used < space_info->total_bytes) thresh += space_info->total_bytes - used; thresh >>= space_info->clamp; used = space_info->bytes_pinned; /* * If we have more ordered bytes than delalloc bytes then we're either * doing a lot of DIO, or we simply don't have a lot of delalloc waiting * around. Preemptive flushing is only useful in that it can free up * space before tickets need to wait for things to finish. In the case * of ordered extents, preemptively waiting on ordered extents gets us * nothing, if our reservations are tied up in ordered extents we'll * simply have to slow down writers by forcing them to wait on ordered * extents. * * In the case that ordered is larger than delalloc, only include the * block reserves that we would actually be able to directly reclaim * from. In this case if we're heavy on metadata operations this will * clearly be heavy enough to warrant preemptive flushing. In the case * of heavy DIO or ordered reservations, preemptive flushing will just * waste time and cause us to slow down. * * We want to make sure we truly are maxed out on ordered however, so * cut ordered in half, and if it's still higher than delalloc then we * can keep flushing. This is to avoid the case where we start * flushing, and now delalloc == ordered and we stop preemptively * flushing when we could still have several gigs of delalloc to flush. */ ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1; delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes); if (ordered >= delalloc) used += btrfs_block_rsv_reserved(&fs_info->delayed_refs_rsv) + btrfs_block_rsv_reserved(&fs_info->delayed_block_rsv); else used += space_info->bytes_may_use - global_rsv_size; return (used >= thresh && !btrfs_fs_closing(fs_info) && !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); } static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, struct reserve_ticket *ticket) { struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; u64 min_bytes; if (!ticket->steal) return false; if (global_rsv->space_info != space_info) return false; spin_lock(&global_rsv->lock); min_bytes = mult_perc(global_rsv->size, 10); if (global_rsv->reserved < min_bytes + ticket->bytes) { spin_unlock(&global_rsv->lock); return false; } global_rsv->reserved -= ticket->bytes; remove_ticket(space_info, ticket); ticket->bytes = 0; wake_up(&ticket->wait); space_info->tickets_id++; if (global_rsv->reserved < global_rsv->size) global_rsv->full = 0; spin_unlock(&global_rsv->lock); return true; } /* * We've exhausted our flushing, start failing tickets. * * @fs_info - fs_info for this fs * @space_info - the space info we were flushing * * We call this when we've exhausted our flushing ability and haven't made * progress in satisfying tickets. The reservation code handles tickets in * order, so if there is a large ticket first and then smaller ones we could * very well satisfy the smaller tickets. This will attempt to wake up any * tickets in the list to catch this case. * * This function returns true if it was able to make progress by clearing out * other tickets, or if it stumbles across a ticket that was smaller than the * first ticket. */ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info) { struct reserve_ticket *ticket; u64 tickets_id = space_info->tickets_id; const bool aborted = BTRFS_FS_ERROR(fs_info); trace_btrfs_fail_all_tickets(fs_info, space_info); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { btrfs_info(fs_info, "cannot satisfy tickets, dumping space info"); __btrfs_dump_space_info(fs_info, space_info); } while (!list_empty(&space_info->tickets) && tickets_id == space_info->tickets_id) { ticket = list_first_entry(&space_info->tickets, struct reserve_ticket, list); if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket)) return true; if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) btrfs_info(fs_info, "failing ticket with %llu bytes", ticket->bytes); remove_ticket(space_info, ticket); if (aborted) ticket->error = -EIO; else ticket->error = -ENOSPC; wake_up(&ticket->wait); /* * We're just throwing tickets away, so more flushing may not * trip over btrfs_try_granting_tickets, so we need to call it * here to see if we can make progress with the next ticket in * the list. */ if (!aborted) btrfs_try_granting_tickets(fs_info, space_info); } return (tickets_id != space_info->tickets_id); } /* * This is for normal flushers, we can wait all goddamned day if we want to. We * will loop and continuously try to flush as long as we are making progress. * We count progress as clearing off tickets each time we have to loop. */ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) { struct btrfs_fs_info *fs_info; struct btrfs_space_info *space_info; u64 to_reclaim; enum btrfs_flush_state flush_state; int commit_cycles = 0; u64 last_tickets_id; enum btrfs_flush_state final_state; fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work); space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); if (btrfs_is_zoned(fs_info)) final_state = RESET_ZONES; else final_state = COMMIT_TRANS; spin_lock(&space_info->lock); to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); if (!to_reclaim) { space_info->flush = 0; spin_unlock(&space_info->lock); return; } last_tickets_id = space_info->tickets_id; spin_unlock(&space_info->lock); flush_state = FLUSH_DELAYED_ITEMS_NR; do { flush_space(fs_info, space_info, to_reclaim, flush_state, false); spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { space_info->flush = 0; spin_unlock(&space_info->lock); return; } to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); if (last_tickets_id == space_info->tickets_id) { flush_state++; } else { last_tickets_id = space_info->tickets_id; flush_state = FLUSH_DELAYED_ITEMS_NR; if (commit_cycles) commit_cycles--; } /* * We do not want to empty the system of delalloc unless we're * under heavy pressure, so allow one trip through the flushing * logic before we start doing a FLUSH_DELALLOC_FULL. */ if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles) flush_state++; /* * We don't want to force a chunk allocation until we've tried * pretty hard to reclaim space. Think of the case where we * freed up a bunch of space and so have a lot of pinned space * to reclaim. We would rather use that than possibly create a * underutilized metadata chunk. So if this is our first run * through the flushing state machine skip ALLOC_CHUNK_FORCE and * commit the transaction. If nothing has changed the next go * around then we can force a chunk allocation. */ if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles) flush_state++; if (flush_state > final_state) { commit_cycles++; if (commit_cycles > 2) { if (maybe_fail_all_tickets(fs_info, space_info)) { flush_state = FLUSH_DELAYED_ITEMS_NR; commit_cycles--; } else { space_info->flush = 0; } } else { flush_state = FLUSH_DELAYED_ITEMS_NR; } } spin_unlock(&space_info->lock); } while (flush_state <= final_state); } /* * This handles pre-flushing of metadata space before we get to the point that * we need to start blocking threads on tickets. The logic here is different * from the other flush paths because it doesn't rely on tickets to tell us how * much we need to flush, instead it attempts to keep us below the 80% full * watermark of space by flushing whichever reservation pool is currently the * largest. */ static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work) { struct btrfs_fs_info *fs_info; struct btrfs_space_info *space_info; struct btrfs_block_rsv *delayed_block_rsv; struct btrfs_block_rsv *delayed_refs_rsv; struct btrfs_block_rsv *global_rsv; struct btrfs_block_rsv *trans_rsv; int loops = 0; fs_info = container_of(work, struct btrfs_fs_info, preempt_reclaim_work); space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA); delayed_block_rsv = &fs_info->delayed_block_rsv; delayed_refs_rsv = &fs_info->delayed_refs_rsv; global_rsv = &fs_info->global_block_rsv; trans_rsv = &fs_info->trans_block_rsv; spin_lock(&space_info->lock); while (need_preemptive_reclaim(fs_info, space_info)) { enum btrfs_flush_state flush; u64 delalloc_size = 0; u64 to_reclaim, block_rsv_size; const u64 global_rsv_size = btrfs_block_rsv_reserved(global_rsv); loops++; /* * We don't have a precise counter for the metadata being * reserved for delalloc, so we'll approximate it by subtracting * out the block rsv's space from the bytes_may_use. If that * amount is higher than the individual reserves, then we can * assume it's tied up in delalloc reservations. */ block_rsv_size = global_rsv_size + btrfs_block_rsv_reserved(delayed_block_rsv) + btrfs_block_rsv_reserved(delayed_refs_rsv) + btrfs_block_rsv_reserved(trans_rsv); if (block_rsv_size < space_info->bytes_may_use) delalloc_size = space_info->bytes_may_use - block_rsv_size; /* * We don't want to include the global_rsv in our calculation, * because that's space we can't touch. Subtract it from the * block_rsv_size for the next checks. */ block_rsv_size -= global_rsv_size; /* * We really want to avoid flushing delalloc too much, as it * could result in poor allocation patterns, so only flush it if * it's larger than the rest of the pools combined. */ if (delalloc_size > block_rsv_size) { to_reclaim = delalloc_size; flush = FLUSH_DELALLOC; } else if (space_info->bytes_pinned > (btrfs_block_rsv_reserved(delayed_block_rsv) + btrfs_block_rsv_reserved(delayed_refs_rsv))) { to_reclaim = space_info->bytes_pinned; flush = COMMIT_TRANS; } else if (btrfs_block_rsv_reserved(delayed_block_rsv) > btrfs_block_rsv_reserved(delayed_refs_rsv)) { to_reclaim = btrfs_block_rsv_reserved(delayed_block_rsv); flush = FLUSH_DELAYED_ITEMS_NR; } else { to_reclaim = btrfs_block_rsv_reserved(delayed_refs_rsv); flush = FLUSH_DELAYED_REFS_NR; } spin_unlock(&space_info->lock); /* * We don't want to reclaim everything, just a portion, so scale * down the to_reclaim by 1/4. If it takes us down to 0, * reclaim 1 items worth. */ to_reclaim >>= 2; if (!to_reclaim) to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1); flush_space(fs_info, space_info, to_reclaim, flush, true); cond_resched(); spin_lock(&space_info->lock); } /* We only went through once, back off our clamping. */ if (loops == 1 && !space_info->reclaim_size) space_info->clamp = max(1, space_info->clamp - 1); trace_btrfs_done_preemptive_reclaim(fs_info, space_info); spin_unlock(&space_info->lock); } /* * FLUSH_DELALLOC_WAIT: * Space is freed from flushing delalloc in one of two ways. * * 1) compression is on and we allocate less space than we reserved * 2) we are overwriting existing space * * For #1 that extra space is reclaimed as soon as the delalloc pages are * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent * length to ->bytes_reserved, and subtracts the reserved space from * ->bytes_may_use. * * For #2 this is trickier. Once the ordered extent runs we will drop the * extent in the range we are overwriting, which creates a delayed ref for * that freed extent. This however is not reclaimed until the transaction * commits, thus the next stages. * * RUN_DELAYED_IPUTS * If we are freeing inodes, we want to make sure all delayed iputs have * completed, because they could have been on an inode with i_nlink == 0, and * thus have been truncated and freed up space. But again this space is not * immediately reusable, it comes in the form of a delayed ref, which must be * run and then the transaction must be committed. * * COMMIT_TRANS * This is where we reclaim all of the pinned space generated by running the * iputs * * RESET_ZONES * This state works only for the zoned mode. We scan the unused block group * list and reset the zones and reuse the block group. * * ALLOC_CHUNK_FORCE * For data we start with alloc chunk force, however we could have been full * before, and then the transaction commit could have freed new block groups, * so if we now have space to allocate do the force chunk allocation. */ static const enum btrfs_flush_state data_flush_states[] = { FLUSH_DELALLOC_FULL, RUN_DELAYED_IPUTS, COMMIT_TRANS, RESET_ZONES, ALLOC_CHUNK_FORCE, }; static void btrfs_async_reclaim_data_space(struct work_struct *work) { struct btrfs_fs_info *fs_info; struct btrfs_space_info *space_info; u64 last_tickets_id; enum btrfs_flush_state flush_state = 0; fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work); space_info = fs_info->data_sinfo; spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { space_info->flush = 0; spin_unlock(&space_info->lock); return; } last_tickets_id = space_info->tickets_id; spin_unlock(&space_info->lock); while (!space_info->full) { flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { space_info->flush = 0; spin_unlock(&space_info->lock); return; } /* Something happened, fail everything and bail. */ if (BTRFS_FS_ERROR(fs_info)) goto aborted_fs; last_tickets_id = space_info->tickets_id; spin_unlock(&space_info->lock); } while (flush_state < ARRAY_SIZE(data_flush_states)) { flush_space(fs_info, space_info, U64_MAX, data_flush_states[flush_state], false); spin_lock(&space_info->lock); if (list_empty(&space_info->tickets)) { space_info->flush = 0; spin_unlock(&space_info->lock); return; } if (last_tickets_id == space_info->tickets_id) { flush_state++; } else { last_tickets_id = space_info->tickets_id; flush_state = 0; } if (flush_state >= ARRAY_SIZE(data_flush_states)) { if (space_info->full) { if (maybe_fail_all_tickets(fs_info, space_info)) flush_state = 0; else space_info->flush = 0; } else { flush_state = 0; } /* Something happened, fail everything and bail. */ if (BTRFS_FS_ERROR(fs_info)) goto aborted_fs; } spin_unlock(&space_info->lock); } return; aborted_fs: maybe_fail_all_tickets(fs_info, space_info); space_info->flush = 0; spin_unlock(&space_info->lock); } void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info) { INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space); INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space); INIT_WORK(&fs_info->preempt_reclaim_work, btrfs_preempt_reclaim_metadata_space); } static const enum btrfs_flush_state priority_flush_states[] = { FLUSH_DELAYED_ITEMS_NR, FLUSH_DELAYED_ITEMS, RESET_ZONES, ALLOC_CHUNK, }; static const enum btrfs_flush_state evict_flush_states[] = { FLUSH_DELAYED_ITEMS_NR, FLUSH_DELAYED_ITEMS, FLUSH_DELAYED_REFS_NR, FLUSH_DELAYED_REFS, FLUSH_DELALLOC, FLUSH_DELALLOC_WAIT, FLUSH_DELALLOC_FULL, ALLOC_CHUNK, COMMIT_TRANS, RESET_ZONES, }; static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, struct reserve_ticket *ticket, const enum btrfs_flush_state *states, int states_nr) { u64 to_reclaim; int flush_state = 0; spin_lock(&space_info->lock); to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info); /* * This is the priority reclaim path, so to_reclaim could be >0 still * because we may have only satisfied the priority tickets and still * left non priority tickets on the list. We would then have * to_reclaim but ->bytes == 0. */ if (ticket->bytes == 0) { spin_unlock(&space_info->lock); return; } while (flush_state < states_nr) { spin_unlock(&space_info->lock); flush_space(fs_info, space_info, to_reclaim, states[flush_state], false); flush_state++; spin_lock(&space_info->lock); if (ticket->bytes == 0) { spin_unlock(&space_info->lock); return; } } /* * Attempt to steal from the global rsv if we can, except if the fs was * turned into error mode due to a transaction abort when flushing space * above, in that case fail with the abort error instead of returning * success to the caller if we can steal from the global rsv - this is * just to have caller fail immeditelly instead of later when trying to * modify the fs, making it easier to debug -ENOSPC problems. */ if (BTRFS_FS_ERROR(fs_info)) { ticket->error = BTRFS_FS_ERROR(fs_info); remove_ticket(space_info, ticket); } else if (!steal_from_global_rsv(fs_info, space_info, ticket)) { ticket->error = -ENOSPC; remove_ticket(space_info, ticket); } /* * We must run try_granting_tickets here because we could be a large * ticket in front of a smaller ticket that can now be satisfied with * the available space. */ btrfs_try_granting_tickets(fs_info, space_info); spin_unlock(&space_info->lock); } static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, struct reserve_ticket *ticket) { spin_lock(&space_info->lock); /* We could have been granted before we got here. */ if (ticket->bytes == 0) { spin_unlock(&space_info->lock); return; } while (!space_info->full) { spin_unlock(&space_info->lock); flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false); spin_lock(&space_info->lock); if (ticket->bytes == 0) { spin_unlock(&space_info->lock); return; } } ticket->error = -ENOSPC; remove_ticket(space_info, ticket); btrfs_try_granting_tickets(fs_info, space_info); spin_unlock(&space_info->lock); } static void wait_reserve_ticket(struct btrfs_space_info *space_info, struct reserve_ticket *ticket) { DEFINE_WAIT(wait); int ret = 0; spin_lock(&space_info->lock); while (ticket->bytes > 0 && ticket->error == 0) { ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE); if (ret) { /* * Delete us from the list. After we unlock the space * info, we don't want the async reclaim job to reserve * space for this ticket. If that would happen, then the * ticket's task would not known that space was reserved * despite getting an error, resulting in a space leak * (bytes_may_use counter of our space_info). */ remove_ticket(space_info, ticket); ticket->error = -EINTR; break; } spin_unlock(&space_info->lock); schedule(); finish_wait(&ticket->wait, &wait); spin_lock(&space_info->lock); } spin_unlock(&space_info->lock); } /* * Do the appropriate flushing and waiting for a ticket. * * @fs_info: the filesystem * @space_info: space info for the reservation * @ticket: ticket for the reservation * @start_ns: timestamp when the reservation started * @orig_bytes: amount of bytes originally reserved * @flush: how much we can flush * * This does the work of figuring out how to flush for the ticket, waiting for * the reservation, and returning the appropriate error if there is one. */ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, struct reserve_ticket *ticket, u64 start_ns, u64 orig_bytes, enum btrfs_reserve_flush_enum flush) { int ret; switch (flush) { case BTRFS_RESERVE_FLUSH_DATA: case BTRFS_RESERVE_FLUSH_ALL: case BTRFS_RESERVE_FLUSH_ALL_STEAL: wait_reserve_ticket(space_info, ticket); break; case BTRFS_RESERVE_FLUSH_LIMIT: priority_reclaim_metadata_space(fs_info, space_info, ticket, priority_flush_states, ARRAY_SIZE(priority_flush_states)); break; case BTRFS_RESERVE_FLUSH_EVICT: priority_reclaim_metadata_space(fs_info, space_info, ticket, evict_flush_states, ARRAY_SIZE(evict_flush_states)); break; case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE: priority_reclaim_data_space(fs_info, space_info, ticket); break; default: ASSERT(0); break; } ret = ticket->error; ASSERT(list_empty(&ticket->list)); /* * Check that we can't have an error set if the reservation succeeded, * as that would confuse tasks and lead them to error out without * releasing reserved space (if an error happens the expectation is that * space wasn't reserved at all). */ ASSERT(!(ticket->bytes == 0 && ticket->error)); trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes, start_ns, flush, ticket->error); return ret; } /* * This returns true if this flush state will go through the ordinary flushing * code. */ static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush) { return (flush == BTRFS_RESERVE_FLUSH_ALL) || (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL); } static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info) { u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes); u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes); /* * If we're heavy on ordered operations then clamping won't help us. We * need to clamp specifically to keep up with dirty'ing buffered * writers, because there's not a 1:1 correlation of writing delalloc * and freeing space, like there is with flushing delayed refs or * delayed nodes. If we're already more ordered than delalloc then * we're keeping up, otherwise we aren't and should probably clamp. */ if (ordered < delalloc) space_info->clamp = min(space_info->clamp + 1, 8); } static inline bool can_steal(enum btrfs_reserve_flush_enum flush) { return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || flush == BTRFS_RESERVE_FLUSH_EVICT); } /* * NO_FLUSH and FLUSH_EMERGENCY don't want to create a ticket, they just want to * fail as quickly as possible. */ static inline bool can_ticket(enum btrfs_reserve_flush_enum flush) { return (flush != BTRFS_RESERVE_NO_FLUSH && flush != BTRFS_RESERVE_FLUSH_EMERGENCY); } /* * Try to reserve bytes from the block_rsv's space. * * @fs_info: the filesystem * @space_info: space info we want to allocate from * @orig_bytes: number of bytes we want * @flush: whether or not we can flush to make our reservation * * This will reserve orig_bytes number of bytes from the space info associated * with the block_rsv. If there is not enough space it will make an attempt to * flush out space to make room. It will do this by flushing delalloc if * possible or committing the transaction. If flush is 0 then no attempts to * regain reservations will be made and this will fail if there is not enough * space already. */ static int __reserve_bytes(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 orig_bytes, enum btrfs_reserve_flush_enum flush) { struct work_struct *async_work; struct reserve_ticket ticket; u64 start_ns = 0; u64 used; int ret = -ENOSPC; bool pending_tickets; ASSERT(orig_bytes); /* * If have a transaction handle (current->journal_info != NULL), then * the flush method can not be neither BTRFS_RESERVE_FLUSH_ALL* nor * BTRFS_RESERVE_FLUSH_EVICT, as we could deadlock because those * flushing methods can trigger transaction commits. */ if (current->journal_info) { /* One assert per line for easier debugging. */ ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL); ASSERT(flush != BTRFS_RESERVE_FLUSH_ALL_STEAL); ASSERT(flush != BTRFS_RESERVE_FLUSH_EVICT); } if (flush == BTRFS_RESERVE_FLUSH_DATA) async_work = &fs_info->async_data_reclaim_work; else async_work = &fs_info->async_reclaim_work; spin_lock(&space_info->lock); used = btrfs_space_info_used(space_info, true); /* * We don't want NO_FLUSH allocations to jump everybody, they can * generally handle ENOSPC in a different way, so treat them the same as * normal flushers when it comes to skipping pending tickets. */ if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH)) pending_tickets = !list_empty(&space_info->tickets) || !list_empty(&space_info->priority_tickets); else pending_tickets = !list_empty(&space_info->priority_tickets); /* * Carry on if we have enough space (short-circuit) OR call * can_overcommit() to ensure we can overcommit to continue. */ if (!pending_tickets && ((used + orig_bytes <= space_info->total_bytes) || btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) { btrfs_space_info_update_bytes_may_use(space_info, orig_bytes); ret = 0; } /* * Things are dire, we need to make a reservation so we don't abort. We * will let this reservation go through as long as we have actual space * left to allocate for the block. */ if (ret && unlikely(flush == BTRFS_RESERVE_FLUSH_EMERGENCY)) { used = btrfs_space_info_used(space_info, false); if (used + orig_bytes <= space_info->total_bytes) { btrfs_space_info_update_bytes_may_use(space_info, orig_bytes); ret = 0; } } /* * If we couldn't make a reservation then setup our reservation ticket * and kick the async worker if it's not already running. * * If we are a priority flusher then we just need to add our ticket to * the list and we will do our own flushing further down. */ if (ret && can_ticket(flush)) { ticket.bytes = orig_bytes; ticket.error = 0; space_info->reclaim_size += ticket.bytes; init_waitqueue_head(&ticket.wait); ticket.steal = can_steal(flush); if (trace_btrfs_reserve_ticket_enabled()) start_ns = ktime_get_ns(); if (flush == BTRFS_RESERVE_FLUSH_ALL || flush == BTRFS_RESERVE_FLUSH_ALL_STEAL || flush == BTRFS_RESERVE_FLUSH_DATA) { list_add_tail(&ticket.list, &space_info->tickets); if (!space_info->flush) { /* * We were forced to add a reserve ticket, so * our preemptive flushing is unable to keep * up. Clamp down on the threshold for the * preemptive flushing in order to keep up with * the workload. */ maybe_clamp_preempt(fs_info, space_info); space_info->flush = 1; trace_btrfs_trigger_flush(fs_info, space_info->flags, orig_bytes, flush, "enospc"); queue_work(system_unbound_wq, async_work); } } else { list_add_tail(&ticket.list, &space_info->priority_tickets); } } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { /* * We will do the space reservation dance during log replay, * which means we won't have fs_info->fs_root set, so don't do * the async reclaim as we will panic. */ if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) && !work_busy(&fs_info->preempt_reclaim_work) && need_preemptive_reclaim(fs_info, space_info)) { trace_btrfs_trigger_flush(fs_info, space_info->flags, orig_bytes, flush, "preempt"); queue_work(system_unbound_wq, &fs_info->preempt_reclaim_work); } } spin_unlock(&space_info->lock); if (!ret || !can_ticket(flush)) return ret; return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns, orig_bytes, flush); } /* * Try to reserve metadata bytes from the block_rsv's space. * * @fs_info: the filesystem * @space_info: the space_info we're allocating for * @orig_bytes: number of bytes we want * @flush: whether or not we can flush to make our reservation * * This will reserve orig_bytes number of bytes from the space info associated * with the block_rsv. If there is not enough space it will make an attempt to * flush out space to make room. It will do this by flushing delalloc if * possible or committing the transaction. If flush is 0 then no attempts to * regain reservations will be made and this will fail if there is not enough * space already. */ int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info, struct btrfs_space_info *space_info, u64 orig_bytes, enum btrfs_reserve_flush_enum flush) { int ret; ret = __reserve_bytes(fs_info, space_info, orig_bytes, flush); if (ret == -ENOSPC) { trace_btrfs_space_reservation(fs_info, "space_info:enospc", space_info->flags, orig_bytes, 1); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) btrfs_dump_space_info(fs_info, space_info, orig_bytes, 0); } return ret; } /* * Try to reserve data bytes for an allocation. * * @fs_info: the filesystem * @bytes: number of bytes we need * @flush: how we are allowed to flush * * This will reserve bytes from the data space info. If there is not enough * space then we will attempt to flush space as specified by flush. */ int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes, enum btrfs_reserve_flush_enum flush) { struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; int ret; ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA || flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE || flush == BTRFS_RESERVE_NO_FLUSH); ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA); ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush); if (ret == -ENOSPC) { trace_btrfs_space_reservation(fs_info, "space_info:enospc", data_sinfo->flags, bytes, 1); if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0); } return ret; } /* Dump all the space infos when we abort a transaction due to ENOSPC. */ __cold void btrfs_dump_space_info_for_trans_abort(struct btrfs_fs_info *fs_info) { struct btrfs_space_info *space_info; btrfs_info(fs_info, "dumping space info:"); list_for_each_entry(space_info, &fs_info->space_info, list) { spin_lock(&space_info->lock); __btrfs_dump_space_info(fs_info, space_info); spin_unlock(&space_info->lock); } dump_global_block_rsv(fs_info); } /* * Account the unused space of all the readonly block group in the space_info. * takes mirrors into account. */ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo) { struct btrfs_block_group *block_group; u64 free_bytes = 0; int factor; /* It's df, we don't care if it's racy */ if (list_empty(&sinfo->ro_bgs)) return 0; spin_lock(&sinfo->lock); list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) { spin_lock(&block_group->lock); if (!block_group->ro) { spin_unlock(&block_group->lock); continue; } factor = btrfs_bg_type_to_factor(block_group->flags); free_bytes += (block_group->length - block_group->used) * factor; spin_unlock(&block_group->lock); } spin_unlock(&sinfo->lock); return free_bytes; } static u64 calc_pct_ratio(u64 x, u64 y) { int err; if (!y) return 0; again: err = check_mul_overflow(100, x, &x); if (err) goto lose_precision; return div64_u64(x, y); lose_precision: x >>= 10; y >>= 10; if (!y) y = 1; goto again; } /* * A reasonable buffer for unallocated space is 10 data block_groups. * If we claw this back repeatedly, we can still achieve efficient * utilization when near full, and not do too much reclaim while * always maintaining a solid buffer for workloads that quickly * allocate and pressure the unallocated space. */ static u64 calc_unalloc_target(struct btrfs_fs_info *fs_info) { u64 chunk_sz = calc_effective_data_chunk_size(fs_info); return BTRFS_UNALLOC_BLOCK_GROUP_TARGET * chunk_sz; } /* * The fundamental goal of automatic reclaim is to protect the filesystem's * unallocated space and thus minimize the probability of the filesystem going * read only when a metadata allocation failure causes a transaction abort. * * However, relocations happen into the space_info's unused space, therefore * automatic reclaim must also back off as that space runs low. There is no * value in doing trivial "relocations" of re-writing the same block group * into a fresh one. * * Furthermore, we want to avoid doing too much reclaim even if there are good * candidates. This is because the allocator is pretty good at filling up the * holes with writes. So we want to do just enough reclaim to try and stay * safe from running out of unallocated space but not be wasteful about it. * * Therefore, the dynamic reclaim threshold is calculated as follows: * - calculate a target unallocated amount of 5 block group sized chunks * - ratchet up the intensity of reclaim depending on how far we are from * that target by using a formula of unalloc / target to set the threshold. * * Typically with 10 block groups as the target, the discrete values this comes * out to are 0, 10, 20, ... , 80, 90, and 99. */ static int calc_dynamic_reclaim_threshold(const struct btrfs_space_info *space_info) { struct btrfs_fs_info *fs_info = space_info->fs_info; u64 unalloc = atomic64_read(&fs_info->free_chunk_space); u64 target = calc_unalloc_target(fs_info); u64 alloc = space_info->total_bytes; u64 used = btrfs_space_info_used(space_info, false); u64 unused = alloc - used; u64 want = target > unalloc ? target - unalloc : 0; u64 data_chunk_size = calc_effective_data_chunk_size(fs_info); /* If we have no unused space, don't bother, it won't work anyway. */ if (unused < data_chunk_size) return 0; /* Cast to int is OK because want <= target. */ return calc_pct_ratio(want, target); } int btrfs_calc_reclaim_threshold(const struct btrfs_space_info *space_info) { lockdep_assert_held(&space_info->lock); if (READ_ONCE(space_info->dynamic_reclaim)) return calc_dynamic_reclaim_threshold(space_info); return READ_ONCE(space_info->bg_reclaim_threshold); } /* * Under "urgent" reclaim, we will reclaim even fresh block groups that have * recently seen successful allocations, as we are desperate to reclaim * whatever we can to avoid ENOSPC in a transaction leading to a readonly fs. */ static bool is_reclaim_urgent(struct btrfs_space_info *space_info) { struct btrfs_fs_info *fs_info = space_info->fs_info; u64 unalloc = atomic64_read(&fs_info->free_chunk_space); u64 data_chunk_size = calc_effective_data_chunk_size(fs_info); return unalloc < data_chunk_size; } static void do_reclaim_sweep(struct btrfs_space_info *space_info, int raid) { struct btrfs_block_group *bg; int thresh_pct; bool try_again = true; bool urgent; spin_lock(&space_info->lock); urgent = is_reclaim_urgent(space_info); thresh_pct = btrfs_calc_reclaim_threshold(space_info); spin_unlock(&space_info->lock); down_read(&space_info->groups_sem); again: list_for_each_entry(bg, &space_info->block_groups[raid], list) { u64 thresh; bool reclaim = false; btrfs_get_block_group(bg); spin_lock(&bg->lock); thresh = mult_perc(bg->length, thresh_pct); if (bg->used < thresh && bg->reclaim_mark) { try_again = false; reclaim = true; } bg->reclaim_mark++; spin_unlock(&bg->lock); if (reclaim) btrfs_mark_bg_to_reclaim(bg); btrfs_put_block_group(bg); } /* * In situations where we are very motivated to reclaim (low unalloc) * use two passes to make the reclaim mark check best effort. * * If we have any staler groups, we don't touch the fresher ones, but if we * really need a block group, do take a fresh one. */ if (try_again && urgent) { try_again = false; goto again; } up_read(&space_info->groups_sem); } void btrfs_space_info_update_reclaimable(struct btrfs_space_info *space_info, s64 bytes) { u64 chunk_sz = calc_effective_data_chunk_size(space_info->fs_info); lockdep_assert_held(&space_info->lock); space_info->reclaimable_bytes += bytes; if (space_info->reclaimable_bytes >= chunk_sz) btrfs_set_periodic_reclaim_ready(space_info, true); } void btrfs_set_periodic_reclaim_ready(struct btrfs_space_info *space_info, bool ready) { lockdep_assert_held(&space_info->lock); if (!READ_ONCE(space_info->periodic_reclaim)) return; if (ready != space_info->periodic_reclaim_ready) { space_info->periodic_reclaim_ready = ready; if (!ready) space_info->reclaimable_bytes = 0; } } bool btrfs_should_periodic_reclaim(struct btrfs_space_info *space_info) { bool ret; if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM) return false; if (!READ_ONCE(space_info->periodic_reclaim)) return false; spin_lock(&space_info->lock); ret = space_info->periodic_reclaim_ready; btrfs_set_periodic_reclaim_ready(space_info, false); spin_unlock(&space_info->lock); return ret; } void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info) { int raid; struct btrfs_space_info *space_info; list_for_each_entry(space_info, &fs_info->space_info, list) { if (!btrfs_should_periodic_reclaim(space_info)) continue; for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) do_reclaim_sweep(space_info, raid); } } void btrfs_return_free_space(struct btrfs_space_info *space_info, u64 len) { struct btrfs_fs_info *fs_info = space_info->fs_info; struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; lockdep_assert_held(&space_info->lock); /* Prioritize the global reservation to receive the freed space. */ if (global_rsv->space_info != space_info) goto grant; spin_lock(&global_rsv->lock); if (!global_rsv->full) { u64 to_add = min(len, global_rsv->size - global_rsv->reserved); global_rsv->reserved += to_add; btrfs_space_info_update_bytes_may_use(space_info, to_add); if (global_rsv->reserved >= global_rsv->size) global_rsv->full = 1; len -= to_add; } spin_unlock(&global_rsv->lock); grant: /* Add to any tickets we may have. */ if (len) btrfs_try_granting_tickets(fs_info, space_info); }
10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 // SPDX-License-Identifier: GPL-2.0-or-later /* * Parallel SCSI (SPI) transport specific attributes exported to sysfs. * * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. * Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com> */ #include <linux/ctype.h> #include <linux/init.h> #include <linux/module.h> #include <linux/workqueue.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/slab.h> #include <linux/suspend.h> #include <scsi/scsi.h> #include "scsi_priv.h" #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_spi.h> #define SPI_NUM_ATTRS 14 /* increase this if you add attributes */ #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always * on" attributes */ #define SPI_HOST_ATTRS 1 #define SPI_MAX_ECHO_BUFFER_SIZE 4096 #define DV_LOOPS 3 #define DV_TIMEOUT (10*HZ) #define DV_RETRIES 3 /* should only need at most * two cc/ua clears */ /* Our blacklist flags */ enum { SPI_BLIST_NOIUS = (__force blist_flags_t)0x1, }; /* blacklist table, modelled on scsi_devinfo.c */ static struct { char *vendor; char *model; blist_flags_t flags; } spi_static_device_list[] __initdata = { {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, {NULL, NULL, 0} }; /* Private data accessors (keep these out of the header file) */ #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) struct spi_internal { struct scsi_transport_template t; struct spi_function_template *f; }; #define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t) static const int ppr_to_ps[] = { /* The PPR values 0-6 are reserved, fill them in when * the committee defines them */ -1, /* 0x00 */ -1, /* 0x01 */ -1, /* 0x02 */ -1, /* 0x03 */ -1, /* 0x04 */ -1, /* 0x05 */ -1, /* 0x06 */ 3125, /* 0x07 */ 6250, /* 0x08 */ 12500, /* 0x09 */ 25000, /* 0x0a */ 30300, /* 0x0b */ 50000, /* 0x0c */ }; /* The PPR values at which you calculate the period in ns by multiplying * by 4 */ #define SPI_STATIC_PPR 0x0c static int sprint_frac(char *dest, int value, int denom) { int frac = value % denom; int result = sprintf(dest, "%d", value / denom); if (frac == 0) return result; dest[result++] = '.'; do { denom /= 10; sprintf(dest + result, "%d", frac / denom); result++; frac %= denom; } while (frac); dest[result++] = '\0'; return result; } static int spi_execute(struct scsi_device *sdev, const void *cmd, enum req_op op, void *buffer, unsigned int bufflen, struct scsi_sense_hdr *sshdr) { blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; struct scsi_failure failure_defs[] = { { .sense = UNIT_ATTENTION, .asc = SCMD_FAILURE_ASC_ANY, .ascq = SCMD_FAILURE_ASCQ_ANY, .allowed = DV_RETRIES, .result = SAM_STAT_CHECK_CONDITION, }, {} }; struct scsi_failures failures = { .failure_definitions = failure_defs, }; const struct scsi_exec_args exec_args = { /* bypass the SDEV_QUIESCE state with BLK_MQ_REQ_PM */ .req_flags = BLK_MQ_REQ_PM, .sshdr = sshdr, .failures = &failures, }; return scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen, DV_TIMEOUT, 1, &exec_args); } static struct { enum spi_signal_type value; char *name; } signal_types[] = { { SPI_SIGNAL_UNKNOWN, "unknown" }, { SPI_SIGNAL_SE, "SE" }, { SPI_SIGNAL_LVD, "LVD" }, { SPI_SIGNAL_HVD, "HVD" }, }; static inline const char *spi_signal_to_string(enum spi_signal_type type) { int i; for (i = 0; i < ARRAY_SIZE(signal_types); i++) { if (type == signal_types[i].value) return signal_types[i].name; } return NULL; } static inline enum spi_signal_type spi_signal_to_value(const char *name) { int i, len; for (i = 0; i < ARRAY_SIZE(signal_types); i++) { len = strlen(signal_types[i].name); if (strncmp(name, signal_types[i].name, len) == 0 && (name[len] == '\n' || name[len] == '\0')) return signal_types[i].value; } return SPI_SIGNAL_UNKNOWN; } static int spi_host_setup(struct transport_container *tc, struct device *dev, struct device *cdev) { struct Scsi_Host *shost = dev_to_shost(dev); spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; return 0; } static int spi_host_configure(struct transport_container *tc, struct device *dev, struct device *cdev); static DECLARE_TRANSPORT_CLASS(spi_host_class, "spi_host", spi_host_setup, NULL, spi_host_configure); static int spi_host_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; if (!scsi_is_host_device(dev)) return 0; shost = dev_to_shost(dev); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; return &shost->transportt->host_attrs.ac == cont; } static int spi_target_configure(struct transport_container *tc, struct device *dev, struct device *cdev); static int spi_device_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct scsi_device *sdev = to_scsi_device(dev); struct scsi_target *starget = sdev->sdev_target; blist_flags_t bflags; bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], &sdev->inquiry[16], SCSI_DEVINFO_SPI); /* Populate the target capability fields with the values * gleaned from the device inquiry */ spi_support_sync(starget) = scsi_device_sync(sdev); spi_support_wide(starget) = scsi_device_wide(sdev); spi_support_dt(starget) = scsi_device_dt(sdev); spi_support_dt_only(starget) = scsi_device_dt_only(sdev); spi_support_ius(starget) = scsi_device_ius(sdev); if (bflags & SPI_BLIST_NOIUS) { dev_info(dev, "Information Units disabled by blacklist\n"); spi_support_ius(starget) = 0; } spi_support_qas(starget) = scsi_device_qas(sdev); return 0; } static int spi_setup_transport_attrs(struct transport_container *tc, struct device *dev, struct device *cdev) { struct scsi_target *starget = to_scsi_target(dev); spi_period(starget) = -1; /* illegal value */ spi_min_period(starget) = 0; spi_offset(starget) = 0; /* async */ spi_max_offset(starget) = 255; spi_width(starget) = 0; /* narrow */ spi_max_width(starget) = 1; spi_iu(starget) = 0; /* no IU */ spi_max_iu(starget) = 1; spi_dt(starget) = 0; /* ST */ spi_qas(starget) = 0; spi_max_qas(starget) = 1; spi_wr_flow(starget) = 0; spi_rd_strm(starget) = 0; spi_rti(starget) = 0; spi_pcomp_en(starget) = 0; spi_hold_mcs(starget) = 0; spi_dv_pending(starget) = 0; spi_dv_in_progress(starget) = 0; spi_initial_dv(starget) = 0; mutex_init(&spi_dv_mutex(starget)); return 0; } #define spi_transport_show_simple(field, format_string) \ \ static ssize_t \ show_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct spi_transport_attrs *tp; \ \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ return snprintf(buf, 20, format_string, tp->field); \ } #define spi_transport_store_simple(field, format_string) \ \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct spi_transport_attrs *tp; \ \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ val = simple_strtoul(buf, NULL, 0); \ tp->field = val; \ return count; \ } #define spi_transport_show_function(field, format_string) \ \ static ssize_t \ show_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_transport_attrs *tp; \ struct spi_internal *i = to_spi_internal(shost->transportt); \ tp = (struct spi_transport_attrs *)&starget->starget_data; \ if (i->f->get_##field) \ i->f->get_##field(starget); \ return snprintf(buf, 20, format_string, tp->field); \ } #define spi_transport_store_function(field, format_string) \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_internal *i = to_spi_internal(shost->transportt); \ \ if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ i->f->set_##field(starget, val); \ return count; \ } #define spi_transport_store_max(field, format_string) \ static ssize_t \ store_spi_transport_##field(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t count) \ { \ int val; \ struct scsi_target *starget = transport_class_to_starget(dev); \ struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ struct spi_internal *i = to_spi_internal(shost->transportt); \ struct spi_transport_attrs *tp \ = (struct spi_transport_attrs *)&starget->starget_data; \ \ if (!i->f->set_##field) \ return -EINVAL; \ val = simple_strtoul(buf, NULL, 0); \ if (val > tp->max_##field) \ val = tp->max_##field; \ i->f->set_##field(starget, val); \ return count; \ } #define spi_transport_rd_attr(field, format_string) \ spi_transport_show_function(field, format_string) \ spi_transport_store_function(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); #define spi_transport_simple_attr(field, format_string) \ spi_transport_show_simple(field, format_string) \ spi_transport_store_simple(field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); #define spi_transport_max_attr(field, format_string) \ spi_transport_show_function(field, format_string) \ spi_transport_store_max(field, format_string) \ spi_transport_simple_attr(max_##field, format_string) \ static DEVICE_ATTR(field, S_IRUGO, \ show_spi_transport_##field, \ store_spi_transport_##field); /* The Parallel SCSI Tranport Attributes: */ spi_transport_max_attr(offset, "%d\n"); spi_transport_max_attr(width, "%d\n"); spi_transport_max_attr(iu, "%d\n"); spi_transport_rd_attr(dt, "%d\n"); spi_transport_max_attr(qas, "%d\n"); spi_transport_rd_attr(wr_flow, "%d\n"); spi_transport_rd_attr(rd_strm, "%d\n"); spi_transport_rd_attr(rti, "%d\n"); spi_transport_rd_attr(pcomp_en, "%d\n"); spi_transport_rd_attr(hold_mcs, "%d\n"); /* we only care about the first child device that's a real SCSI device * so we return 1 to terminate the iteration when we find it */ static int child_iter(struct device *dev, void *data) { if (!scsi_is_sdev_device(dev)) return 0; spi_dv_device(to_scsi_device(dev)); return 1; } static ssize_t store_spi_revalidate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(dev); device_for_each_child(&starget->dev, NULL, child_iter); return count; } static DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate); /* Translate the period into ns according to the current spec * for SDTR/PPR messages */ static int period_to_str(char *buf, int period) { int len, picosec; if (period < 0 || period > 0xff) { picosec = -1; } else if (period <= SPI_STATIC_PPR) { picosec = ppr_to_ps[period]; } else { picosec = period * 4000; } if (picosec == -1) { len = sprintf(buf, "reserved"); } else { len = sprint_frac(buf, picosec, 1000); } return len; } static ssize_t show_spi_transport_period_helper(char *buf, int period) { int len = period_to_str(buf, period); buf[len++] = '\n'; buf[len] = '\0'; return len; } static ssize_t store_spi_transport_period_helper(struct device *dev, const char *buf, size_t count, int *periodp) { int j, picosec, period = -1; char *endp; picosec = simple_strtoul(buf, &endp, 10) * 1000; if (*endp == '.') { int mult = 100; do { endp++; if (!isdigit(*endp)) break; picosec += (*endp - '0') * mult; mult /= 10; } while (mult > 0); } for (j = 0; j <= SPI_STATIC_PPR; j++) { if (ppr_to_ps[j] < picosec) continue; period = j; break; } if (period == -1) period = picosec / 4000; if (period > 0xff) period = 0xff; *periodp = period; return count; } static ssize_t show_spi_transport_period(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_target *starget = transport_class_to_starget(dev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; if (i->f->get_period) i->f->get_period(starget); return show_spi_transport_period_helper(buf, tp->period); } static ssize_t store_spi_transport_period(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; int period, retval; if (!i->f->set_period) return -EINVAL; retval = store_spi_transport_period_helper(cdev, buf, count, &period); if (period < tp->min_period) period = tp->min_period; i->f->set_period(starget, period); return retval; } static DEVICE_ATTR(period, S_IRUGO, show_spi_transport_period, store_spi_transport_period); static ssize_t show_spi_transport_min_period(struct device *cdev, struct device_attribute *attr, char *buf) { struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); struct spi_internal *i = to_spi_internal(shost->transportt); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; if (!i->f->set_period) return -EINVAL; return show_spi_transport_period_helper(buf, tp->min_period); } static ssize_t store_spi_transport_min_period(struct device *cdev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_target *starget = transport_class_to_starget(cdev); struct spi_transport_attrs *tp = (struct spi_transport_attrs *)&starget->starget_data; return store_spi_transport_period_helper(cdev, buf, count, &tp->min_period); } static DEVICE_ATTR(min_period, S_IRUGO, show_spi_transport_min_period, store_spi_transport_min_period); static ssize_t show_spi_host_signalling(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *i = to_spi_internal(shost->transportt); if (i->f->get_signalling) i->f->get_signalling(shost); return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost))); } static ssize_t store_spi_host_signalling(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = transport_class_to_shost(dev); struct spi_internal *i = to_spi_internal(shost->transportt); enum spi_signal_type type = spi_signal_to_value(buf); if (!i->f->set_signalling) return -EINVAL; if (type != SPI_SIGNAL_UNKNOWN) i->f->set_signalling(shost, type); return count; } static DEVICE_ATTR(signalling, S_IRUGO, show_spi_host_signalling, store_spi_host_signalling); static ssize_t show_spi_host_width(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(cdev); return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow"); } static DEVICE_ATTR(host_width, S_IRUGO, show_spi_host_width, NULL); static ssize_t show_spi_host_hba_id(struct device *cdev, struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = transport_class_to_shost(cdev); return sprintf(buf, "%d\n", shost->this_id); } static DEVICE_ATTR(hba_id, S_IRUGO, show_spi_host_hba_id, NULL); #define DV_SET(x, y) \ if(i->f->set_##x) \ i->f->set_##x(sdev->sdev_target, y) enum spi_compare_returns { SPI_COMPARE_SUCCESS, SPI_COMPARE_FAILURE, SPI_COMPARE_SKIP_TEST, }; /* This is for read/write Domain Validation: If the device supports * an echo buffer, we do read/write tests to it */ static enum spi_compare_returns spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer, u8 *ptr, const int retries) { int len = ptr - buffer; int j, k, r, result; unsigned int pattern = 0x0000ffff; struct scsi_sense_hdr sshdr; const char spi_write_buffer[] = { WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 }; const char spi_read_buffer[] = { READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 }; /* set up the pattern buffer. Doesn't matter if we spill * slightly beyond since that's where the read buffer is */ for (j = 0; j < len; ) { /* fill the buffer with counting (test a) */ for ( ; j < min(len, 32); j++) buffer[j] = j; k = j; /* fill the buffer with alternating words of 0x0 and * 0xffff (test b) */ for ( ; j < min(len, k + 32); j += 2) { u16 *word = (u16 *)&buffer[j]; *word = (j & 0x02) ? 0x0000 : 0xffff; } k = j; /* fill with crosstalk (alternating 0x5555 0xaaa) * (test c) */ for ( ; j < min(len, k + 32); j += 2) { u16 *word = (u16 *)&buffer[j]; *word = (j & 0x02) ? 0x5555 : 0xaaaa; } k = j; /* fill with shifting bits (test d) */ for ( ; j < min(len, k + 32); j += 4) { u32 *word = (unsigned int *)&buffer[j]; u32 roll = (pattern & 0x80000000) ? 1 : 0; *word = pattern; pattern = (pattern << 1) | roll; } /* don't bother with random data (test e) */ } for (r = 0; r < retries; r++) { result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT, buffer, len, &sshdr); if (result || !scsi_device_online(sdev)) { scsi_device_set_state(sdev, SDEV_QUIESCE); if (result > 0 && scsi_sense_valid(&sshdr) && sshdr.sense_key == ILLEGAL_REQUEST /* INVALID FIELD IN CDB */ && sshdr.asc == 0x24 && sshdr.ascq == 0x00) /* This would mean that the drive lied * to us about supporting an echo * buffer (unfortunately some Western * Digital drives do precisely this) */ return SPI_COMPARE_SKIP_TEST; sdev_printk(KERN_ERR, sdev, "Write Buffer failure %x\n", result); return SPI_COMPARE_FAILURE; } memset(ptr, 0, len); spi_execute(sdev, spi_read_buffer, REQ_OP_DRV_IN, ptr, len, NULL); scsi_device_set_state(sdev, SDEV_QUIESCE); if (memcmp(buffer, ptr, len) != 0) return SPI_COMPARE_FAILURE; } return SPI_COMPARE_SUCCESS; } /* This is for the simplest form of Domain Validation: a read test * on the inquiry data from the device */ static enum spi_compare_returns spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, u8 *ptr, const int retries) { int r, result; const int len = sdev->inquiry_len; const char spi_inquiry[] = { INQUIRY, 0, 0, 0, len, 0 }; for (r = 0; r < retries; r++) { memset(ptr, 0, len); result = spi_execute(sdev, spi_inquiry, REQ_OP_DRV_IN, ptr, len, NULL); if(result || !scsi_device_online(sdev)) { scsi_device_set_state(sdev, SDEV_QUIESCE); return SPI_COMPARE_FAILURE; } /* If we don't have the inquiry data already, the * first read gets it */ if (ptr == buffer) { ptr += len; --r; continue; } if (memcmp(buffer, ptr, len) != 0) /* failure */ return SPI_COMPARE_FAILURE; } return SPI_COMPARE_SUCCESS; } static enum spi_compare_returns spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, enum spi_compare_returns (*compare_fn)(struct scsi_device *, u8 *, u8 *, int)) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; int period = 0, prevperiod = 0; enum spi_compare_returns retval; for (;;) { int newperiod; retval = compare_fn(sdev, buffer, ptr, DV_LOOPS); if (retval == SPI_COMPARE_SUCCESS || retval == SPI_COMPARE_SKIP_TEST) break; /* OK, retrain, fallback */ if (i->f->get_iu) i->f->get_iu(starget); if (i->f->get_qas) i->f->get_qas(starget); if (i->f->get_period) i->f->get_period(sdev->sdev_target); /* Here's the fallback sequence; first try turning off * IU, then QAS (if we can control them), then finally * fall down the periods */ if (i->f->set_iu && spi_iu(starget)) { starget_printk(KERN_ERR, starget, "Domain Validation Disabling Information Units\n"); DV_SET(iu, 0); } else if (i->f->set_qas && spi_qas(starget)) { starget_printk(KERN_ERR, starget, "Domain Validation Disabling Quick Arbitration and Selection\n"); DV_SET(qas, 0); } else { newperiod = spi_period(starget); period = newperiod > period ? newperiod : period; if (period < 0x0d) period++; else period += period >> 1; if (unlikely(period > 0xff || period == prevperiod)) { /* Total failure; set to async and return */ starget_printk(KERN_ERR, starget, "Domain Validation Failure, dropping back to Asynchronous\n"); DV_SET(offset, 0); return SPI_COMPARE_FAILURE; } starget_printk(KERN_ERR, starget, "Domain Validation detected failure, dropping back\n"); DV_SET(period, period); prevperiod = period; } } return retval; } static int spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer) { int l, result; /* first off do a test unit ready. This can error out * because of reservations or some other reason. If it * fails, the device won't let us write to the echo buffer * so just return failure */ static const char spi_test_unit_ready[] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 }; static const char spi_read_buffer_descriptor[] = { READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0 }; /* We send a set of three TURs to clear any outstanding * unit attention conditions if they exist (Otherwise the * buffer tests won't be happy). If the TUR still fails * (reservation conflict, device not ready, etc) just * skip the write tests */ for (l = 0; ; l++) { result = spi_execute(sdev, spi_test_unit_ready, REQ_OP_DRV_IN, NULL, 0, NULL); if(result) { if(l >= 3) return 0; } else { /* TUR succeeded */ break; } } result = spi_execute(sdev, spi_read_buffer_descriptor, REQ_OP_DRV_IN, buffer, 4, NULL); if (result) /* Device has no echo buffer */ return 0; return buffer[3] + ((buffer[2] & 0x1f) << 8); } static void spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) { struct spi_internal *i = to_spi_internal(sdev->host->transportt); struct scsi_target *starget = sdev->sdev_target; struct Scsi_Host *shost = sdev->host; int len = sdev->inquiry_len; int min_period = spi_min_period(starget); int max_width = spi_max_width(starget); /* first set us up for narrow async */ DV_SET(offset, 0); DV_SET(width, 0); if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); /* FIXME: should probably offline the device here? */ return; } if (!spi_support_wide(starget)) { spi_max_width(starget) = 0; max_width = 0; } /* test width */ if (i->f->set_width && max_width) { i->f->set_width(starget, 1); if (spi_dv_device_compare_inquiry(sdev, buffer, buffer + len, DV_LOOPS) != SPI_COMPARE_SUCCESS) { starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); i->f->set_width(starget, 0); /* Make sure we don't force wide back on by asking * for a transfer period that requires it */ max_width = 0; if (min_period < 10) min_period = 10; } } if (!i->f->set_period) return; /* device can't handle synchronous */ if (!spi_support_sync(starget) && !spi_support_dt(starget)) return; /* len == -1 is the signal that we need to ascertain the * presence of an echo buffer before trying to use it. len == * 0 means we don't have an echo buffer */ len = -1; retry: /* now set up to the maximum */ DV_SET(offset, spi_max_offset(starget)); DV_SET(period, min_period); /* try QAS requests; this should be harmless to set if the * target supports it */ if (spi_support_qas(starget) && spi_max_qas(starget)) { DV_SET(qas, 1); } else { DV_SET(qas, 0); } if (spi_support_ius(starget) && spi_max_iu(starget) && min_period < 9) { /* This u320 (or u640). Set IU transfers */ DV_SET(iu, 1); /* Then set the optional parameters */ DV_SET(rd_strm, 1); DV_SET(wr_flow, 1); DV_SET(rti, 1); if (min_period == 8) DV_SET(pcomp_en, 1); } else { DV_SET(iu, 0); } /* now that we've done all this, actually check the bus * signal type (if known). Some devices are stupid on * a SE bus and still claim they can try LVD only settings */ if (i->f->get_signalling) i->f->get_signalling(shost); if (spi_signalling(shost) == SPI_SIGNAL_SE || spi_signalling(shost) == SPI_SIGNAL_HVD || !spi_support_dt(starget)) { DV_SET(dt, 0); } else { DV_SET(dt, 1); } /* set width last because it will pull all the other * parameters down to required values */ DV_SET(width, max_width); /* Do the read only INQUIRY tests */ spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, spi_dv_device_compare_inquiry); /* See if we actually managed to negotiate and sustain DT */ if (i->f->get_dt) i->f->get_dt(starget); /* see if the device has an echo buffer. If it does we can do * the SPI pattern write tests. Because of some broken * devices, we *only* try this on a device that has actually * negotiated DT */ if (len == -1 && spi_dt(starget)) len = spi_dv_device_get_echo_buffer(sdev, buffer); if (len <= 0) { starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n"); return; } if (len > SPI_MAX_ECHO_BUFFER_SIZE) { starget_printk(KERN_WARNING, starget, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE); len = SPI_MAX_ECHO_BUFFER_SIZE; } if (spi_dv_retrain(sdev, buffer, buffer + len, spi_dv_device_echo_buffer) == SPI_COMPARE_SKIP_TEST) { /* OK, the stupid drive can't do a write echo buffer * test after all, fall back to the read tests */ len = 0; goto retry; } } /** * spi_dv_device - Do Domain Validation on the device * @sdev: scsi device to validate * * Performs the domain validation on the given device in the * current execution thread. Since DV operations may sleep, * the current thread must have user context. Also no SCSI * related locks that would deadlock I/O issued by the DV may * be held. */ void spi_dv_device(struct scsi_device *sdev) { struct scsi_target *starget = sdev->sdev_target; const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; unsigned int sleep_flags; u8 *buffer; /* * Because this function and the power management code both call * scsi_device_quiesce(), it is not safe to perform domain validation * while suspend or resume is in progress. Hence the * lock/unlock_system_sleep() calls. */ sleep_flags = lock_system_sleep(); if (scsi_autopm_get_device(sdev)) goto unlock_system_sleep; if (unlikely(spi_dv_in_progress(starget))) goto put_autopm; if (unlikely(scsi_device_get(sdev))) goto put_autopm; spi_dv_in_progress(starget) = 1; buffer = kzalloc(len, GFP_KERNEL); if (unlikely(!buffer)) goto put_sdev; /* We need to verify that the actual device will quiesce; the * later target quiesce is just a nice to have */ if (unlikely(scsi_device_quiesce(sdev))) goto free_buffer; scsi_target_quiesce(starget); spi_dv_pending(starget) = 1; mutex_lock(&spi_dv_mutex(starget)); starget_printk(KERN_INFO, starget, "Beginning Domain Validation\n"); spi_dv_device_internal(sdev, buffer); starget_printk(KERN_INFO, starget, "Ending Domain Validation\n"); mutex_unlock(&spi_dv_mutex(starget)); spi_dv_pending(starget) = 0; scsi_target_resume(starget); spi_initial_dv(starget) = 1; free_buffer: kfree(buffer); put_sdev: spi_dv_in_progress(starget) = 0; scsi_device_put(sdev); put_autopm: scsi_autopm_put_device(sdev); unlock_system_sleep: unlock_system_sleep(sleep_flags); } EXPORT_SYMBOL(spi_dv_device); struct work_queue_wrapper { struct work_struct work; struct scsi_device *sdev; }; static void spi_dv_device_work_wrapper(struct work_struct *work) { struct work_queue_wrapper *wqw = container_of(work, struct work_queue_wrapper, work); struct scsi_device *sdev = wqw->sdev; kfree(wqw); spi_dv_device(sdev); spi_dv_pending(sdev->sdev_target) = 0; scsi_device_put(sdev); } /** * spi_schedule_dv_device - schedule domain validation to occur on the device * @sdev: The device to validate * * Identical to spi_dv_device() above, except that the DV will be * scheduled to occur in a workqueue later. All memory allocations * are atomic, so may be called from any context including those holding * SCSI locks. */ void spi_schedule_dv_device(struct scsi_device *sdev) { struct work_queue_wrapper *wqw = kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); if (unlikely(!wqw)) return; if (unlikely(spi_dv_pending(sdev->sdev_target))) { kfree(wqw); return; } /* Set pending early (dv_device doesn't check it, only sets it) */ spi_dv_pending(sdev->sdev_target) = 1; if (unlikely(scsi_device_get(sdev))) { kfree(wqw); spi_dv_pending(sdev->sdev_target) = 0; return; } INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); wqw->sdev = sdev; schedule_work(&wqw->work); } EXPORT_SYMBOL(spi_schedule_dv_device); /** * spi_display_xfer_agreement - Print the current target transfer agreement * @starget: The target for which to display the agreement * * Each SPI port is required to maintain a transfer agreement for each * other port on the bus. This function prints a one-line summary of * the current agreement; more detailed information is available in sysfs. */ void spi_display_xfer_agreement(struct scsi_target *starget) { struct spi_transport_attrs *tp; tp = (struct spi_transport_attrs *)&starget->starget_data; if (tp->offset > 0 && tp->period > 0) { unsigned int picosec, kb100; char *scsi = "FAST-?"; char tmp[8]; if (tp->period <= SPI_STATIC_PPR) { picosec = ppr_to_ps[tp->period]; switch (tp->period) { case 7: scsi = "FAST-320"; break; case 8: scsi = "FAST-160"; break; case 9: scsi = "FAST-80"; break; case 10: case 11: scsi = "FAST-40"; break; case 12: scsi = "FAST-20"; break; } } else { picosec = tp->period * 4000; if (tp->period < 25) scsi = "FAST-20"; else if (tp->period < 50) scsi = "FAST-10"; else scsi = "FAST-5"; } kb100 = (10000000 + picosec / 2) / picosec; if (tp->width) kb100 *= 2; sprint_frac(tmp, picosec, 1000); dev_info(&starget->dev, "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n", scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, tp->dt ? "DT" : "ST", tp->iu ? " IU" : "", tp->qas ? " QAS" : "", tp->rd_strm ? " RDSTRM" : "", tp->rti ? " RTI" : "", tp->wr_flow ? " WRFLOW" : "", tp->pcomp_en ? " PCOMP" : "", tp->hold_mcs ? " HMCS" : "", tmp, tp->offset); } else { dev_info(&starget->dev, "%sasynchronous\n", tp->width ? "wide " : ""); } } EXPORT_SYMBOL(spi_display_xfer_agreement); int spi_populate_width_msg(unsigned char *msg, int width) { msg[0] = EXTENDED_MESSAGE; msg[1] = 2; msg[2] = EXTENDED_WDTR; msg[3] = width; return 4; } EXPORT_SYMBOL_GPL(spi_populate_width_msg); int spi_populate_sync_msg(unsigned char *msg, int period, int offset) { msg[0] = EXTENDED_MESSAGE; msg[1] = 3; msg[2] = EXTENDED_SDTR; msg[3] = period; msg[4] = offset; return 5; } EXPORT_SYMBOL_GPL(spi_populate_sync_msg); int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, int width, int options) { msg[0] = EXTENDED_MESSAGE; msg[1] = 6; msg[2] = EXTENDED_PPR; msg[3] = period; msg[4] = 0; msg[5] = offset; msg[6] = width; msg[7] = options; return 8; } EXPORT_SYMBOL_GPL(spi_populate_ppr_msg); /** * spi_populate_tag_msg - place a tag message in a buffer * @msg: pointer to the area to place the tag * @cmd: pointer to the scsi command for the tag * * Notes: * designed to create the correct type of tag message for the * particular request. Returns the size of the tag message. * May return 0 if TCQ is disabled for this device. **/ int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) { if (cmd->flags & SCMD_TAGGED) { *msg++ = SIMPLE_QUEUE_TAG; *msg++ = scsi_cmd_to_rq(cmd)->tag; return 2; } return 0; } EXPORT_SYMBOL_GPL(spi_populate_tag_msg); #ifdef CONFIG_SCSI_CONSTANTS static const char * const one_byte_msgs[] = { /* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers", /* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error", /* 0x06 */ "Abort Task Set", "Message Reject", "Nop", "Message Parity Error", /* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag", /* 0x0c */ "Target Reset", "Abort Task", "Clear Task Set", /* 0x0f */ "Initiate Recovery", "Release Recovery", /* 0x11 */ "Terminate Process", "Continue Task", "Target Transfer Disable", /* 0x14 */ NULL, NULL, "Clear ACA", "LUN Reset" }; static const char * const two_byte_msgs[] = { /* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag", /* 0x23 */ "Ignore Wide Residue", "ACA" }; static const char * const extended_msgs[] = { /* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request", /* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request", /* 0x04 */ "Parallel Protocol Request", "Modify Bidirectional Data Pointer" }; static void print_nego(const unsigned char *msg, int per, int off, int width) { if (per) { char buf[20]; period_to_str(buf, msg[per]); printk("period = %s ns ", buf); } if (off) printk("offset = %d ", msg[off]); if (width) printk("width = %d ", 8 << msg[width]); } static void print_ptr(const unsigned char *msg, int msb, const char *desc) { int ptr = (msg[msb] << 24) | (msg[msb+1] << 16) | (msg[msb+2] << 8) | msg[msb+3]; printk("%s = %d ", desc, ptr); } int spi_print_msg(const unsigned char *msg) { int len = 1, i; if (msg[0] == EXTENDED_MESSAGE) { len = 2 + msg[1]; if (len == 2) len += 256; if (msg[2] < ARRAY_SIZE(extended_msgs)) printk ("%s ", extended_msgs[msg[2]]); else printk ("Extended Message, reserved code (0x%02x) ", (int) msg[2]); switch (msg[2]) { case EXTENDED_MODIFY_DATA_POINTER: print_ptr(msg, 3, "pointer"); break; case EXTENDED_SDTR: print_nego(msg, 3, 4, 0); break; case EXTENDED_WDTR: print_nego(msg, 0, 0, 3); break; case EXTENDED_PPR: print_nego(msg, 3, 5, 6); break; case EXTENDED_MODIFY_BIDI_DATA_PTR: print_ptr(msg, 3, "out"); print_ptr(msg, 7, "in"); break; default: for (i = 2; i < len; ++i) printk("%02x ", msg[i]); } /* Identify */ } else if (msg[0] & 0x80) { printk("Identify disconnect %sallowed %s %d ", (msg[0] & 0x40) ? "" : "not ", (msg[0] & 0x20) ? "target routine" : "lun", msg[0] & 0x7); /* Normal One byte */ } else if (msg[0] < 0x1f) { if (msg[0] < ARRAY_SIZE(one_byte_msgs) && one_byte_msgs[msg[0]]) printk("%s ", one_byte_msgs[msg[0]]); else printk("reserved (%02x) ", msg[0]); } else if (msg[0] == 0x55) { printk("QAS Request "); /* Two byte */ } else if (msg[0] <= 0x2f) { if ((msg[0] - 0x20) < ARRAY_SIZE(two_byte_msgs)) printk("%s %02x ", two_byte_msgs[msg[0] - 0x20], msg[1]); else printk("reserved two byte (%02x %02x) ", msg[0], msg[1]); len = 2; } else printk("reserved "); return len; } EXPORT_SYMBOL(spi_print_msg); #else /* ifndef CONFIG_SCSI_CONSTANTS */ int spi_print_msg(const unsigned char *msg) { int len = 1, i; if (msg[0] == EXTENDED_MESSAGE) { len = 2 + msg[1]; if (len == 2) len += 256; for (i = 0; i < len; ++i) printk("%02x ", msg[i]); /* Identify */ } else if (msg[0] & 0x80) { printk("%02x ", msg[0]); /* Normal One byte */ } else if ((msg[0] < 0x1f) || (msg[0] == 0x55)) { printk("%02x ", msg[0]); /* Two byte */ } else if (msg[0] <= 0x2f) { printk("%02x %02x", msg[0], msg[1]); len = 2; } else printk("%02x ", msg[0]); return len; } EXPORT_SYMBOL(spi_print_msg); #endif /* ! CONFIG_SCSI_CONSTANTS */ static int spi_device_match(struct attribute_container *cont, struct device *dev) { struct scsi_device *sdev; struct Scsi_Host *shost; struct spi_internal *i; if (!scsi_is_sdev_device(dev)) return 0; sdev = to_scsi_device(dev); shost = sdev->host; if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; /* Note: this class has no device attributes, so it has * no per-HBA allocation and thus we don't need to distinguish * the attribute containers for the device */ i = to_spi_internal(shost->transportt); if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target)) return 0; return 1; } static int spi_target_match(struct attribute_container *cont, struct device *dev) { struct Scsi_Host *shost; struct scsi_target *starget; struct spi_internal *i; if (!scsi_is_target_device(dev)) return 0; shost = dev_to_shost(dev->parent); if (!shost->transportt || shost->transportt->host_attrs.ac.class != &spi_host_class.class) return 0; i = to_spi_internal(shost->transportt); starget = to_scsi_target(dev); if (i->f->deny_binding && i->f->deny_binding(starget)) return 0; return &i->t.target_attrs.ac == cont; } static DECLARE_TRANSPORT_CLASS(spi_transport_class, "spi_transport", spi_setup_transport_attrs, NULL, spi_target_configure); static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, spi_device_match, spi_device_configure); static struct attribute *host_attributes[] = { &dev_attr_signalling.attr, &dev_attr_host_width.attr, &dev_attr_hba_id.attr, NULL }; static struct attribute_group host_attribute_group = { .attrs = host_attributes, }; static int spi_host_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct kobject *kobj = &cdev->kobj; struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *si = to_spi_internal(shost->transportt); struct attribute *attr = &dev_attr_signalling.attr; int rc = 0; if (si->f->set_signalling) rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR); return rc; } /* returns true if we should be showing the variable. Also * overloads the return by setting 1<<1 if the attribute should * be writeable */ #define TARGET_ATTRIBUTE_HELPER(name) \ (si->f->show_##name ? S_IRUGO : 0) | \ (si->f->set_##name ? S_IWUSR : 0) static umode_t target_attribute_is_visible(struct kobject *kobj, struct attribute *attr, int i) { struct device *cdev = container_of(kobj, struct device, kobj); struct scsi_target *starget = transport_class_to_starget(cdev); struct Scsi_Host *shost = transport_class_to_shost(cdev); struct spi_internal *si = to_spi_internal(shost->transportt); if (attr == &dev_attr_period.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(period); else if (attr == &dev_attr_min_period.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(period); else if (attr == &dev_attr_offset.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(offset); else if (attr == &dev_attr_max_offset.attr && spi_support_sync(starget)) return TARGET_ATTRIBUTE_HELPER(offset); else if (attr == &dev_attr_width.attr && spi_support_wide(starget)) return TARGET_ATTRIBUTE_HELPER(width); else if (attr == &dev_attr_max_width.attr && spi_support_wide(starget)) return TARGET_ATTRIBUTE_HELPER(width); else if (attr == &dev_attr_iu.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(iu); else if (attr == &dev_attr_max_iu.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(iu); else if (attr == &dev_attr_dt.attr && spi_support_dt(starget)) return TARGET_ATTRIBUTE_HELPER(dt); else if (attr == &dev_attr_qas.attr && spi_support_qas(starget)) return TARGET_ATTRIBUTE_HELPER(qas); else if (attr == &dev_attr_max_qas.attr && spi_support_qas(starget)) return TARGET_ATTRIBUTE_HELPER(qas); else if (attr == &dev_attr_wr_flow.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(wr_flow); else if (attr == &dev_attr_rd_strm.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(rd_strm); else if (attr == &dev_attr_rti.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(rti); else if (attr == &dev_attr_pcomp_en.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(pcomp_en); else if (attr == &dev_attr_hold_mcs.attr && spi_support_ius(starget)) return TARGET_ATTRIBUTE_HELPER(hold_mcs); else if (attr == &dev_attr_revalidate.attr) return S_IWUSR; return 0; } static struct attribute *target_attributes[] = { &dev_attr_period.attr, &dev_attr_min_period.attr, &dev_attr_offset.attr, &dev_attr_max_offset.attr, &dev_attr_width.attr, &dev_attr_max_width.attr, &dev_attr_iu.attr, &dev_attr_max_iu.attr, &dev_attr_dt.attr, &dev_attr_qas.attr, &dev_attr_max_qas.attr, &dev_attr_wr_flow.attr, &dev_attr_rd_strm.attr, &dev_attr_rti.attr, &dev_attr_pcomp_en.attr, &dev_attr_hold_mcs.attr, &dev_attr_revalidate.attr, NULL }; static struct attribute_group target_attribute_group = { .attrs = target_attributes, .is_visible = target_attribute_is_visible, }; static int spi_target_configure(struct transport_container *tc, struct device *dev, struct device *cdev) { struct kobject *kobj = &cdev->kobj; /* force an update based on parameters read from the device */ sysfs_update_group(kobj, &target_attribute_group); return 0; } struct scsi_transport_template * spi_attach_transport(struct spi_function_template *ft) { struct spi_internal *i = kzalloc(sizeof(struct spi_internal), GFP_KERNEL); if (unlikely(!i)) return NULL; i->t.target_attrs.ac.class = &spi_transport_class.class; i->t.target_attrs.ac.grp = &target_attribute_group; i->t.target_attrs.ac.match = spi_target_match; transport_container_register(&i->t.target_attrs); i->t.target_size = sizeof(struct spi_transport_attrs); i->t.host_attrs.ac.class = &spi_host_class.class; i->t.host_attrs.ac.grp = &host_attribute_group; i->t.host_attrs.ac.match = spi_host_match; transport_container_register(&i->t.host_attrs); i->t.host_size = sizeof(struct spi_host_attrs); i->f = ft; return &i->t; } EXPORT_SYMBOL(spi_attach_transport); void spi_release_transport(struct scsi_transport_template *t) { struct spi_internal *i = to_spi_internal(t); transport_container_unregister(&i->t.target_attrs); transport_container_unregister(&i->t.host_attrs); kfree(i); } EXPORT_SYMBOL(spi_release_transport); static __init int spi_transport_init(void) { int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, "SCSI Parallel Transport Class"); if (!error) { int i; for (i = 0; spi_static_device_list[i].vendor; i++) scsi_dev_info_list_add_keyed(1, /* compatible */ spi_static_device_list[i].vendor, spi_static_device_list[i].model, NULL, spi_static_device_list[i].flags, SCSI_DEVINFO_SPI); } error = transport_class_register(&spi_transport_class); if (error) return error; error = anon_transport_class_register(&spi_device_class); return transport_class_register(&spi_host_class); } static void __exit spi_transport_exit(void) { transport_class_unregister(&spi_transport_class); anon_transport_class_unregister(&spi_device_class); transport_class_unregister(&spi_host_class); scsi_dev_info_remove_list(SCSI_DEVINFO_SPI); } MODULE_AUTHOR("Martin Hicks"); MODULE_DESCRIPTION("SPI Transport Attributes"); MODULE_LICENSE("GPL"); module_init(spi_transport_init); module_exit(spi_transport_exit);
17 16 6 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Media entity * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> */ #ifndef _MEDIA_ENTITY_H #define _MEDIA_ENTITY_H #include <linux/bitmap.h> #include <linux/bug.h> #include <linux/container_of.h> #include <linux/fwnode.h> #include <linux/list.h> #include <linux/media.h> #include <linux/minmax.h> #include <linux/types.h> /* Enums used internally at the media controller to represent graphs */ /** * enum media_gobj_type - type of a graph object * * @MEDIA_GRAPH_ENTITY: Identify a media entity * @MEDIA_GRAPH_PAD: Identify a media pad * @MEDIA_GRAPH_LINK: Identify a media link * @MEDIA_GRAPH_INTF_DEVNODE: Identify a media Kernel API interface via * a device node */ enum media_gobj_type { MEDIA_GRAPH_ENTITY, MEDIA_GRAPH_PAD, MEDIA_GRAPH_LINK, MEDIA_GRAPH_INTF_DEVNODE, }; #define MEDIA_BITS_PER_TYPE 8 #define MEDIA_BITS_PER_ID (32 - MEDIA_BITS_PER_TYPE) #define MEDIA_ID_MASK GENMASK_ULL(MEDIA_BITS_PER_ID - 1, 0) /* Structs to represent the objects that belong to a media graph */ /** * struct media_gobj - Define a graph object. * * @mdev: Pointer to the struct &media_device that owns the object * @id: Non-zero object ID identifier. The ID should be unique * inside a media_device, as it is composed by * %MEDIA_BITS_PER_TYPE to store the type plus * %MEDIA_BITS_PER_ID to store the ID * @list: List entry stored in one of the per-type mdev object lists * * All objects on the media graph should have this struct embedded */ struct media_gobj { struct media_device *mdev; u32 id; struct list_head list; }; #define MEDIA_ENTITY_ENUM_MAX_DEPTH 16 /** * struct media_entity_enum - An enumeration of media entities. * * @bmap: Bit map in which each bit represents one entity at struct * media_entity->internal_idx. * @idx_max: Number of bits in bmap */ struct media_entity_enum { unsigned long *bmap; int idx_max; }; /** * struct media_graph - Media graph traversal state * * @stack: Graph traversal stack; the stack contains information * on the path the media entities to be walked and the * links through which they were reached. * @stack.entity: pointer to &struct media_entity at the graph. * @stack.link: pointer to &struct list_head. * @ent_enum: Visited entities * @top: The top of the stack */ struct media_graph { struct { struct media_entity *entity; struct list_head *link; } stack[MEDIA_ENTITY_ENUM_MAX_DEPTH]; struct media_entity_enum ent_enum; int top; }; /** * struct media_pipeline - Media pipeline related information * * @allocated: Media pipeline allocated and freed by the framework * @mdev: The media device the pipeline is part of * @pads: List of media_pipeline_pad * @start_count: Media pipeline start - stop count */ struct media_pipeline { bool allocated; struct media_device *mdev; struct list_head pads; int start_count; }; /** * struct media_pipeline_pad - A pad part of a media pipeline * * @list: Entry in the media_pad pads list * @pipe: The media_pipeline that the pad is part of * @pad: The media pad * * This structure associate a pad with a media pipeline. Instances of * media_pipeline_pad are created by media_pipeline_start() when it builds the * pipeline, and stored in the &media_pad.pads list. media_pipeline_stop() * removes the entries from the list and deletes them. */ struct media_pipeline_pad { struct list_head list; struct media_pipeline *pipe; struct media_pad *pad; }; /** * struct media_pipeline_pad_iter - Iterator for media_pipeline_for_each_pad * * @cursor: The current element */ struct media_pipeline_pad_iter { struct list_head *cursor; }; /** * struct media_pipeline_entity_iter - Iterator for media_pipeline_for_each_entity * * @ent_enum: The entity enumeration tracker * @cursor: The current element */ struct media_pipeline_entity_iter { struct media_entity_enum ent_enum; struct list_head *cursor; }; /** * struct media_link - A link object part of a media graph. * * @graph_obj: Embedded structure containing the media object common data * @list: Linked list associated with an entity or an interface that * owns the link. * @gobj0: Part of a union. Used to get the pointer for the first * graph_object of the link. * @source: Part of a union. Used only if the first object (gobj0) is * a pad. In that case, it represents the source pad. * @intf: Part of a union. Used only if the first object (gobj0) is * an interface. * @gobj1: Part of a union. Used to get the pointer for the second * graph_object of the link. * @sink: Part of a union. Used only if the second object (gobj1) is * a pad. In that case, it represents the sink pad. * @entity: Part of a union. Used only if the second object (gobj1) is * an entity. * @reverse: Pointer to the link for the reverse direction of a pad to pad * link. * @flags: Link flags, as defined in uapi/media.h (MEDIA_LNK_FL_*) * @is_backlink: Indicate if the link is a backlink. */ struct media_link { struct media_gobj graph_obj; struct list_head list; union { struct media_gobj *gobj0; struct media_pad *source; struct media_interface *intf; }; union { struct media_gobj *gobj1; struct media_pad *sink; struct media_entity *entity; }; struct media_link *reverse; unsigned long flags; bool is_backlink; }; /** * enum media_pad_signal_type - type of the signal inside a media pad * * @PAD_SIGNAL_DEFAULT: * Default signal. Use this when all inputs or all outputs are * uniquely identified by the pad number. * @PAD_SIGNAL_ANALOG: * The pad contains an analog signal. It can be Radio Frequency, * Intermediate Frequency, a baseband signal or sub-carriers. * Tuner inputs, IF-PLL demodulators, composite and s-video signals * should use it. * @PAD_SIGNAL_DV: * Contains a digital video signal, with can be a bitstream of samples * taken from an analog TV video source. On such case, it usually * contains the VBI data on it. * @PAD_SIGNAL_AUDIO: * Contains an Intermediate Frequency analog signal from an audio * sub-carrier or an audio bitstream. IF signals are provided by tuners * and consumed by audio AM/FM decoders. Bitstream audio is provided by * an audio decoder. */ enum media_pad_signal_type { PAD_SIGNAL_DEFAULT = 0, PAD_SIGNAL_ANALOG, PAD_SIGNAL_DV, PAD_SIGNAL_AUDIO, }; /** * struct media_pad - A media pad graph object. * * @graph_obj: Embedded structure containing the media object common data * @entity: Entity this pad belongs to * @index: Pad index in the entity pads array, numbered from 0 to n * @num_links: Number of links connected to this pad * @sig_type: Type of the signal inside a media pad * @flags: Pad flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_PAD_FL_*``) * @pipe: Pipeline this pad belongs to. Use media_entity_pipeline() to * access this field. */ struct media_pad { struct media_gobj graph_obj; /* must be first field in struct */ struct media_entity *entity; u16 index; u16 num_links; enum media_pad_signal_type sig_type; unsigned long flags; /* * The fields below are private, and should only be accessed via * appropriate functions. */ struct media_pipeline *pipe; }; /** * struct media_entity_operations - Media entity operations * @get_fwnode_pad: Return the pad number based on a fwnode endpoint or * a negative value on error. This operation can be used * to map a fwnode to a media pad number. Optional. * @link_setup: Notify the entity of link changes. The operation can * return an error, in which case link setup will be * cancelled. Optional. * @link_validate: Return whether a link is valid from the entity point of * view. The media_pipeline_start() function * validates all links by calling this operation. Optional. * @has_pad_interdep: Return whether two pads of the entity are * interdependent. If two pads are interdependent they are * part of the same pipeline and enabling one of the pads * means that the other pad will become "locked" and * doesn't allow configuration changes. pad0 and pad1 are * guaranteed to not both be sinks or sources. Never call * the .has_pad_interdep() operation directly, always use * media_entity_has_pad_interdep(). * Optional: If the operation isn't implemented all pads * will be considered as interdependent. * * .. note:: * * Those these callbacks are called with struct &media_device.graph_mutex * mutex held. */ struct media_entity_operations { int (*get_fwnode_pad)(struct media_entity *entity, struct fwnode_endpoint *endpoint); int (*link_setup)(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags); int (*link_validate)(struct media_link *link); bool (*has_pad_interdep)(struct media_entity *entity, unsigned int pad0, unsigned int pad1); }; /** * enum media_entity_type - Media entity type * * @MEDIA_ENTITY_TYPE_BASE: * The entity isn't embedded in another subsystem structure. * @MEDIA_ENTITY_TYPE_VIDEO_DEVICE: * The entity is embedded in a struct video_device instance. * @MEDIA_ENTITY_TYPE_V4L2_SUBDEV: * The entity is embedded in a struct v4l2_subdev instance. * * Media entity objects are often not instantiated directly, but the media * entity structure is inherited by (through embedding) other subsystem-specific * structures. The media entity type identifies the type of the subclass * structure that implements a media entity instance. * * This allows runtime type identification of media entities and safe casting to * the correct object type. For instance, a media entity structure instance * embedded in a v4l2_subdev structure instance will have the type * %MEDIA_ENTITY_TYPE_V4L2_SUBDEV and can safely be cast to a &v4l2_subdev * structure using the container_of() macro. */ enum media_entity_type { MEDIA_ENTITY_TYPE_BASE, MEDIA_ENTITY_TYPE_VIDEO_DEVICE, MEDIA_ENTITY_TYPE_V4L2_SUBDEV, }; /** * struct media_entity - A media entity graph object. * * @graph_obj: Embedded structure containing the media object common data. * @name: Entity name. * @obj_type: Type of the object that implements the media_entity. * @function: Entity main function, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_ENT_F_*``) * @flags: Entity flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_ENT_FL_*``) * @num_pads: Number of sink and source pads. * @num_links: Total number of links, forward and back, enabled and disabled. * @num_backlinks: Number of backlinks * @internal_idx: An unique internal entity specific number. The numbers are * re-used if entities are unregistered or registered again. * @pads: Pads array with the size defined by @num_pads. * @links: List of data links. * @ops: Entity operations. * @use_count: Use count for the entity. * @info: Union with devnode information. Kept just for backward * compatibility. * @info.dev: Contains device major and minor info. * @info.dev.major: device node major, if the device is a devnode. * @info.dev.minor: device node minor, if the device is a devnode. * * .. note:: * * The @use_count reference count must never be negative, but is a signed * integer on purpose: a simple ``WARN_ON(<0)`` check can be used to detect * reference count bugs that would make it negative. */ struct media_entity { struct media_gobj graph_obj; /* must be first field in struct */ const char *name; enum media_entity_type obj_type; u32 function; unsigned long flags; u16 num_pads; u16 num_links; u16 num_backlinks; int internal_idx; struct media_pad *pads; struct list_head links; const struct media_entity_operations *ops; int use_count; union { struct { u32 major; u32 minor; } dev; } info; }; /** * media_entity_for_each_pad - Iterate on all pads in an entity * @entity: The entity the pads belong to * @iter: The iterator pad * * Iterate on all pads in a media entity. */ #define media_entity_for_each_pad(entity, iter) \ for (iter = (entity)->pads; \ iter < &(entity)->pads[(entity)->num_pads]; \ ++iter) /** * struct media_interface - A media interface graph object. * * @graph_obj: embedded graph object * @links: List of links pointing to graph entities * @type: Type of the interface as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_INTF_T_*``) * @flags: Interface flags as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_INTF_FL_*``) * * .. note:: * * Currently, no flags for &media_interface is defined. */ struct media_interface { struct media_gobj graph_obj; struct list_head links; u32 type; u32 flags; }; /** * struct media_intf_devnode - A media interface via a device node. * * @intf: embedded interface object * @major: Major number of a device node * @minor: Minor number of a device node */ struct media_intf_devnode { struct media_interface intf; /* Should match the fields at media_v2_intf_devnode */ u32 major; u32 minor; }; /** * media_entity_id() - return the media entity graph object id * * @entity: pointer to &media_entity */ static inline u32 media_entity_id(struct media_entity *entity) { return entity->graph_obj.id; } /** * media_type() - return the media object type * * @gobj: Pointer to the struct &media_gobj graph object */ static inline enum media_gobj_type media_type(struct media_gobj *gobj) { return gobj->id >> MEDIA_BITS_PER_ID; } /** * media_id() - return the media object ID * * @gobj: Pointer to the struct &media_gobj graph object */ static inline u32 media_id(struct media_gobj *gobj) { return gobj->id & MEDIA_ID_MASK; } /** * media_gobj_gen_id() - encapsulates type and ID on at the object ID * * @type: object type as define at enum &media_gobj_type. * @local_id: next ID, from struct &media_device.id. */ static inline u32 media_gobj_gen_id(enum media_gobj_type type, u64 local_id) { u32 id; id = type << MEDIA_BITS_PER_ID; id |= local_id & MEDIA_ID_MASK; return id; } /** * is_media_entity_v4l2_video_device() - Check if the entity is a video_device * @entity: pointer to entity * * Return: %true if the entity is an instance of a video_device object and can * safely be cast to a struct video_device using the container_of() macro, or * %false otherwise. */ static inline bool is_media_entity_v4l2_video_device(struct media_entity *entity) { return entity && entity->obj_type == MEDIA_ENTITY_TYPE_VIDEO_DEVICE; } /** * is_media_entity_v4l2_subdev() - Check if the entity is a v4l2_subdev * @entity: pointer to entity * * Return: %true if the entity is an instance of a &v4l2_subdev object and can * safely be cast to a struct &v4l2_subdev using the container_of() macro, or * %false otherwise. */ static inline bool is_media_entity_v4l2_subdev(struct media_entity *entity) { return entity && entity->obj_type == MEDIA_ENTITY_TYPE_V4L2_SUBDEV; } /** * media_entity_enum_init - Initialise an entity enumeration * * @ent_enum: Entity enumeration to be initialised * @mdev: The related media device * * Return: zero on success or a negative error code. */ __must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, struct media_device *mdev); /** * media_entity_enum_cleanup - Release resources of an entity enumeration * * @ent_enum: Entity enumeration to be released */ void media_entity_enum_cleanup(struct media_entity_enum *ent_enum); /** * media_entity_enum_zero - Clear the entire enum * * @ent_enum: Entity enumeration to be cleared */ static inline void media_entity_enum_zero(struct media_entity_enum *ent_enum) { bitmap_zero(ent_enum->bmap, ent_enum->idx_max); } /** * media_entity_enum_set - Mark a single entity in the enum * * @ent_enum: Entity enumeration * @entity: Entity to be marked */ static inline void media_entity_enum_set(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) return; __set_bit(entity->internal_idx, ent_enum->bmap); } /** * media_entity_enum_clear - Unmark a single entity in the enum * * @ent_enum: Entity enumeration * @entity: Entity to be unmarked */ static inline void media_entity_enum_clear(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) return; __clear_bit(entity->internal_idx, ent_enum->bmap); } /** * media_entity_enum_test - Test whether the entity is marked * * @ent_enum: Entity enumeration * @entity: Entity to be tested * * Returns %true if the entity was marked. */ static inline bool media_entity_enum_test(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) return true; return test_bit(entity->internal_idx, ent_enum->bmap); } /** * media_entity_enum_test_and_set - Test whether the entity is marked, * and mark it * * @ent_enum: Entity enumeration * @entity: Entity to be tested * * Returns %true if the entity was marked, and mark it before doing so. */ static inline bool media_entity_enum_test_and_set(struct media_entity_enum *ent_enum, struct media_entity *entity) { if (WARN_ON(entity->internal_idx >= ent_enum->idx_max)) return true; return __test_and_set_bit(entity->internal_idx, ent_enum->bmap); } /** * media_entity_enum_empty - Test whether the entire enum is empty * * @ent_enum: Entity enumeration * * Return: %true if the entity was empty. */ static inline bool media_entity_enum_empty(struct media_entity_enum *ent_enum) { return bitmap_empty(ent_enum->bmap, ent_enum->idx_max); } /** * media_entity_enum_intersects - Test whether two enums intersect * * @ent_enum1: First entity enumeration * @ent_enum2: Second entity enumeration * * Return: %true if entity enumerations @ent_enum1 and @ent_enum2 intersect, * otherwise %false. */ static inline bool media_entity_enum_intersects( struct media_entity_enum *ent_enum1, struct media_entity_enum *ent_enum2) { WARN_ON(ent_enum1->idx_max != ent_enum2->idx_max); return bitmap_intersects(ent_enum1->bmap, ent_enum2->bmap, min(ent_enum1->idx_max, ent_enum2->idx_max)); } /** * gobj_to_entity - returns the struct &media_entity pointer from the * @gobj contained on it. * * @gobj: Pointer to the struct &media_gobj graph object */ #define gobj_to_entity(gobj) \ container_of(gobj, struct media_entity, graph_obj) /** * gobj_to_pad - returns the struct &media_pad pointer from the * @gobj contained on it. * * @gobj: Pointer to the struct &media_gobj graph object */ #define gobj_to_pad(gobj) \ container_of(gobj, struct media_pad, graph_obj) /** * gobj_to_link - returns the struct &media_link pointer from the * @gobj contained on it. * * @gobj: Pointer to the struct &media_gobj graph object */ #define gobj_to_link(gobj) \ container_of(gobj, struct media_link, graph_obj) /** * gobj_to_intf - returns the struct &media_interface pointer from the * @gobj contained on it. * * @gobj: Pointer to the struct &media_gobj graph object */ #define gobj_to_intf(gobj) \ container_of(gobj, struct media_interface, graph_obj) /** * intf_to_devnode - returns the struct media_intf_devnode pointer from the * @intf contained on it. * * @intf: Pointer to struct &media_intf_devnode */ #define intf_to_devnode(intf) \ container_of(intf, struct media_intf_devnode, intf) /** * media_gobj_create - Initialize a graph object * * @mdev: Pointer to the &media_device that contains the object * @type: Type of the object * @gobj: Pointer to the struct &media_gobj graph object * * This routine initializes the embedded struct &media_gobj inside a * media graph object. It is called automatically if ``media_*_create`` * function calls are used. However, if the object (entity, link, pad, * interface) is embedded on some other object, this function should be * called before registering the object at the media controller. */ void media_gobj_create(struct media_device *mdev, enum media_gobj_type type, struct media_gobj *gobj); /** * media_gobj_destroy - Stop using a graph object on a media device * * @gobj: Pointer to the struct &media_gobj graph object * * This should be called by all routines like media_device_unregister() * that remove/destroy media graph objects. */ void media_gobj_destroy(struct media_gobj *gobj); /** * media_entity_pads_init() - Initialize the entity pads * * @entity: entity where the pads belong * @num_pads: total number of sink and source pads * @pads: Array of @num_pads pads. * * The pads array is managed by the entity driver and passed to * media_entity_pads_init() where its pointer will be stored in the * &media_entity structure. * * If no pads are needed, drivers could either directly fill * &media_entity->num_pads with 0 and &media_entity->pads with %NULL or call * this function that will do the same. * * As the number of pads is known in advance, the pads array is not allocated * dynamically but is managed by the entity driver. Most drivers will embed the * pads array in a driver-specific structure, avoiding dynamic allocation. * * Drivers must set the direction of every pad in the pads array before calling * media_entity_pads_init(). The function will initialize the other pads fields. */ int media_entity_pads_init(struct media_entity *entity, u16 num_pads, struct media_pad *pads); /** * media_entity_cleanup() - free resources associated with an entity * * @entity: entity where the pads belong * * This function must be called during the cleanup phase after unregistering * the entity (currently, it does nothing). * * Calling media_entity_cleanup() on a media_entity whose memory has been * zeroed but that has not been initialized with media_entity_pad_init() is * valid and is a no-op. */ #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) static inline void media_entity_cleanup(struct media_entity *entity) {} #else #define media_entity_cleanup(entity) do { } while (false) #endif /** * media_get_pad_index() - retrieves a pad index from an entity * * @entity: entity where the pads belong * @pad_type: the type of the pad, one of MEDIA_PAD_FL_* pad types * @sig_type: type of signal of the pad to be search * * This helper function finds the first pad index inside an entity that * satisfies both @is_sink and @sig_type conditions. * * Return: * * On success, return the pad number. If the pad was not found or the media * entity is a NULL pointer, return -EINVAL. */ int media_get_pad_index(struct media_entity *entity, u32 pad_type, enum media_pad_signal_type sig_type); /** * media_create_pad_link() - creates a link between two entities. * * @source: pointer to &media_entity of the source pad. * @source_pad: number of the source pad in the pads array * @sink: pointer to &media_entity of the sink pad. * @sink_pad: number of the sink pad in the pads array. * @flags: Link flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * ( seek for ``MEDIA_LNK_FL_*``) * * Valid values for flags: * * %MEDIA_LNK_FL_ENABLED * Indicates that the link is enabled and can be used to transfer media data. * When two or more links target a sink pad, only one of them can be * enabled at a time. * * %MEDIA_LNK_FL_IMMUTABLE * Indicates that the link enabled state can't be modified at runtime. If * %MEDIA_LNK_FL_IMMUTABLE is set, then %MEDIA_LNK_FL_ENABLED must also be * set, since an immutable link is always enabled. * * .. note:: * * Before calling this function, media_entity_pads_init() and * media_device_register_entity() should be called previously for both ends. */ __must_check int media_create_pad_link(struct media_entity *source, u16 source_pad, struct media_entity *sink, u16 sink_pad, u32 flags); /** * media_create_pad_links() - creates a link between two entities. * * @mdev: Pointer to the media_device that contains the object * @source_function: Function of the source entities. Used only if @source is * NULL. * @source: pointer to &media_entity of the source pad. If NULL, it will use * all entities that matches the @sink_function. * @source_pad: number of the source pad in the pads array * @sink_function: Function of the sink entities. Used only if @sink is NULL. * @sink: pointer to &media_entity of the sink pad. If NULL, it will use * all entities that matches the @sink_function. * @sink_pad: number of the sink pad in the pads array. * @flags: Link flags, as defined in include/uapi/linux/media.h. * @allow_both_undefined: if %true, then both @source and @sink can be NULL. * In such case, it will create a crossbar between all entities that * matches @source_function to all entities that matches @sink_function. * If %false, it will return 0 and won't create any link if both @source * and @sink are NULL. * * Valid values for flags: * * A %MEDIA_LNK_FL_ENABLED flag indicates that the link is enabled and can be * used to transfer media data. If multiple links are created and this * flag is passed as an argument, only the first created link will have * this flag. * * A %MEDIA_LNK_FL_IMMUTABLE flag indicates that the link enabled state can't * be modified at runtime. If %MEDIA_LNK_FL_IMMUTABLE is set, then * %MEDIA_LNK_FL_ENABLED must also be set since an immutable link is * always enabled. * * It is common for some devices to have multiple source and/or sink entities * of the same type that should be linked. While media_create_pad_link() * creates link by link, this function is meant to allow 1:n, n:1 and even * cross-bar (n:n) links. * * .. note:: * * Before calling this function, media_entity_pads_init() and * media_device_register_entity() should be called previously for the * entities to be linked. */ int media_create_pad_links(const struct media_device *mdev, const u32 source_function, struct media_entity *source, const u16 source_pad, const u32 sink_function, struct media_entity *sink, const u16 sink_pad, u32 flags, const bool allow_both_undefined); void __media_entity_remove_links(struct media_entity *entity); /** * media_entity_remove_links() - remove all links associated with an entity * * @entity: pointer to &media_entity * * .. note:: * * This is called automatically when an entity is unregistered via * media_device_register_entity(). */ void media_entity_remove_links(struct media_entity *entity); /** * __media_entity_setup_link - Configure a media link without locking * @link: The link being configured * @flags: Link configuration flags * * The bulk of link setup is handled by the two entities connected through the * link. This function notifies both entities of the link configuration change. * * If the link is immutable or if the current and new configuration are * identical, return immediately. * * The user is expected to hold link->source->parent->mutex. If not, * media_entity_setup_link() should be used instead. */ int __media_entity_setup_link(struct media_link *link, u32 flags); /** * media_entity_setup_link() - changes the link flags properties in runtime * * @link: pointer to &media_link * @flags: the requested new link flags * * The only configurable property is the %MEDIA_LNK_FL_ENABLED link flag * to enable/disable a link. Links marked with the * %MEDIA_LNK_FL_IMMUTABLE link flag can not be enabled or disabled. * * When a link is enabled or disabled, the media framework calls the * link_setup operation for the two entities at the source and sink of the * link, in that order. If the second link_setup call fails, another * link_setup call is made on the first entity to restore the original link * flags. * * Media device drivers can be notified of link setup operations by setting the * &media_device.link_notify pointer to a callback function. If provided, the * notification callback will be called before enabling and after disabling * links. * * Entity drivers must implement the link_setup operation if any of their links * is non-immutable. The operation must either configure the hardware or store * the configuration information to be applied later. * * Link configuration must not have any side effect on other links. If an * enabled link at a sink pad prevents another link at the same pad from * being enabled, the link_setup operation must return %-EBUSY and can't * implicitly disable the first enabled link. * * .. note:: * * The valid values of the flags for the link is the same as described * on media_create_pad_link(), for pad to pad links or the same as described * on media_create_intf_link(), for interface to entity links. */ int media_entity_setup_link(struct media_link *link, u32 flags); /** * media_entity_find_link - Find a link between two pads * @source: Source pad * @sink: Sink pad * * Return: returns a pointer to the link between the two entities. If no * such link exists, return %NULL. */ struct media_link *media_entity_find_link(struct media_pad *source, struct media_pad *sink); /** * media_pad_remote_pad_first - Find the first pad at the remote end of a link * @pad: Pad at the local end of the link * * Search for a remote pad connected to the given pad by iterating over all * links originating or terminating at that pad until an enabled link is found. * * Return: returns a pointer to the pad at the remote end of the first found * enabled link, or %NULL if no enabled link has been found. */ struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad); /** * media_pad_remote_pad_unique - Find a remote pad connected to a pad * @pad: The pad * * Search for and return a remote pad connected to @pad through an enabled * link. If multiple (or no) remote pads are found, an error is returned. * * The uniqueness constraint makes this helper function suitable for entities * that support a single active source at a time on a given pad. * * Return: A pointer to the remote pad, or one of the following error pointers * if an error occurs: * * * -ENOTUNIQ - Multiple links are enabled * * -ENOLINK - No connected pad found */ struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad); /** * media_entity_remote_pad_unique - Find a remote pad connected to an entity * @entity: The entity * @type: The type of pad to find (MEDIA_PAD_FL_SINK or MEDIA_PAD_FL_SOURCE) * * Search for and return a remote pad of @type connected to @entity through an * enabled link. If multiple (or no) remote pads match these criteria, an error * is returned. * * The uniqueness constraint makes this helper function suitable for entities * that support a single active source or sink at a time. * * Return: A pointer to the remote pad, or one of the following error pointers * if an error occurs: * * * -ENOTUNIQ - Multiple links are enabled * * -ENOLINK - No connected pad found */ struct media_pad * media_entity_remote_pad_unique(const struct media_entity *entity, unsigned int type); /** * media_entity_remote_source_pad_unique - Find a remote source pad connected to * an entity * @entity: The entity * * Search for and return a remote source pad connected to @entity through an * enabled link. If multiple (or no) remote pads match these criteria, an error * is returned. * * The uniqueness constraint makes this helper function suitable for entities * that support a single active source at a time. * * Return: A pointer to the remote pad, or one of the following error pointers * if an error occurs: * * * -ENOTUNIQ - Multiple links are enabled * * -ENOLINK - No connected pad found */ static inline struct media_pad * media_entity_remote_source_pad_unique(const struct media_entity *entity) { return media_entity_remote_pad_unique(entity, MEDIA_PAD_FL_SOURCE); } /** * media_pad_is_streaming - Test if a pad is part of a streaming pipeline * @pad: The pad * * Return: True if the pad is part of a pipeline started with the * media_pipeline_start() function, false otherwise. */ static inline bool media_pad_is_streaming(const struct media_pad *pad) { return pad->pipe; } /** * media_entity_is_streaming - Test if an entity is part of a streaming pipeline * @entity: The entity * * Return: True if the entity is part of a pipeline started with the * media_pipeline_start() function, false otherwise. */ static inline bool media_entity_is_streaming(const struct media_entity *entity) { struct media_pad *pad; media_entity_for_each_pad(entity, pad) { if (media_pad_is_streaming(pad)) return true; } return false; } /** * media_entity_pipeline - Get the media pipeline an entity is part of * @entity: The entity * * DEPRECATED: use media_pad_pipeline() instead. * * This function returns the media pipeline that an entity has been associated * with when constructing the pipeline with media_pipeline_start(). The pointer * remains valid until media_pipeline_stop() is called. * * In general, entities can be part of multiple pipelines, when carrying * multiple streams (either on different pads, or on the same pad using * multiplexed streams). This function is to be used only for entities that * do not support multiple pipelines. * * Return: The media_pipeline the entity is part of, or NULL if the entity is * not part of any pipeline. */ struct media_pipeline *media_entity_pipeline(struct media_entity *entity); /** * media_pad_pipeline - Get the media pipeline a pad is part of * @pad: The pad * * This function returns the media pipeline that a pad has been associated * with when constructing the pipeline with media_pipeline_start(). The pointer * remains valid until media_pipeline_stop() is called. * * Return: The media_pipeline the pad is part of, or NULL if the pad is * not part of any pipeline. */ struct media_pipeline *media_pad_pipeline(struct media_pad *pad); /** * media_entity_get_fwnode_pad - Get pad number from fwnode * * @entity: The entity * @fwnode: Pointer to the fwnode_handle which should be used to find the pad * @direction_flags: Expected direction of the pad, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * (seek for ``MEDIA_PAD_FL_*``) * * This function can be used to resolve the media pad number from * a fwnode. This is useful for devices which use more complex * mappings of media pads. * * If the entity does not implement the get_fwnode_pad() operation * then this function searches the entity for the first pad that * matches the @direction_flags. * * Return: returns the pad number on success or a negative error code. */ int media_entity_get_fwnode_pad(struct media_entity *entity, const struct fwnode_handle *fwnode, unsigned long direction_flags); /** * media_graph_walk_init - Allocate resources used by graph walk. * * @graph: Media graph structure that will be used to walk the graph * @mdev: Pointer to the &media_device that contains the object * * This function is deprecated, use media_pipeline_for_each_pad() instead. * * The caller is required to hold the media_device graph_mutex during the graph * walk until the graph state is released. * * Returns zero on success or a negative error code otherwise. */ __must_check int media_graph_walk_init( struct media_graph *graph, struct media_device *mdev); /** * media_graph_walk_cleanup - Release resources used by graph walk. * * @graph: Media graph structure that will be used to walk the graph * * This function is deprecated, use media_pipeline_for_each_pad() instead. */ void media_graph_walk_cleanup(struct media_graph *graph); /** * media_graph_walk_start - Start walking the media graph at a * given entity * * @graph: Media graph structure that will be used to walk the graph * @entity: Starting entity * * This function is deprecated, use media_pipeline_for_each_pad() instead. * * Before using this function, media_graph_walk_init() must be * used to allocate resources used for walking the graph. This * function initializes the graph traversal structure to walk the * entities graph starting at the given entity. The traversal * structure must not be modified by the caller during graph * traversal. After the graph walk, the resources must be released * using media_graph_walk_cleanup(). */ void media_graph_walk_start(struct media_graph *graph, struct media_entity *entity); /** * media_graph_walk_next - Get the next entity in the graph * @graph: Media graph structure * * This function is deprecated, use media_pipeline_for_each_pad() instead. * * Perform a depth-first traversal of the given media entities graph. * * The graph structure must have been previously initialized with a call to * media_graph_walk_start(). * * Return: returns the next entity in the graph or %NULL if the whole graph * have been traversed. */ struct media_entity *media_graph_walk_next(struct media_graph *graph); /** * media_pipeline_start - Mark a pipeline as streaming * @origin: Starting pad * @pipe: Media pipeline to be assigned to all pads in the pipeline. * * Mark all pads connected to pad @origin through enabled links, either * directly or indirectly, as streaming. The given pipeline object is assigned * to every pad in the pipeline and stored in the media_pad pipe field. * * Calls to this function can be nested, in which case the same number of * media_pipeline_stop() calls will be required to stop streaming. The * pipeline pointer must be identical for all nested calls to * media_pipeline_start(). */ __must_check int media_pipeline_start(struct media_pad *origin, struct media_pipeline *pipe); /** * __media_pipeline_start - Mark a pipeline as streaming * * @origin: Starting pad * @pipe: Media pipeline to be assigned to all pads in the pipeline. * * ..note:: This is the non-locking version of media_pipeline_start() */ __must_check int __media_pipeline_start(struct media_pad *origin, struct media_pipeline *pipe); /** * media_pipeline_stop - Mark a pipeline as not streaming * @pad: Starting pad * * Mark all pads connected to a given pad through enabled links, either * directly or indirectly, as not streaming. The media_pad pipe field is * reset to %NULL. * * If multiple calls to media_pipeline_start() have been made, the same * number of calls to this function are required to mark the pipeline as not * streaming. */ void media_pipeline_stop(struct media_pad *pad); /** * __media_pipeline_stop - Mark a pipeline as not streaming * * @pad: Starting pad * * .. note:: This is the non-locking version of media_pipeline_stop() */ void __media_pipeline_stop(struct media_pad *pad); struct media_pad * __media_pipeline_pad_iter_next(struct media_pipeline *pipe, struct media_pipeline_pad_iter *iter, struct media_pad *pad); /** * media_pipeline_for_each_pad - Iterate on all pads in a media pipeline * @pipe: The pipeline * @iter: The iterator (struct media_pipeline_pad_iter) * @pad: The iterator pad * * Iterate on all pads in a media pipeline. This is only valid after the * pipeline has been built with media_pipeline_start() and before it gets * destroyed with media_pipeline_stop(). */ #define media_pipeline_for_each_pad(pipe, iter, pad) \ for (pad = __media_pipeline_pad_iter_next((pipe), iter, NULL); \ pad != NULL; \ pad = __media_pipeline_pad_iter_next((pipe), iter, pad)) /** * media_pipeline_entity_iter_init - Initialize a pipeline entity iterator * @pipe: The pipeline * @iter: The iterator * * This function must be called to initialize the iterator before using it in a * media_pipeline_for_each_entity() loop. The iterator must be destroyed by a * call to media_pipeline_entity_iter_cleanup after the loop (including in code * paths that break from the loop). * * The same iterator can be used in multiple consecutive loops without being * destroyed and reinitialized. * * Return: 0 on success or a negative error code otherwise. */ int media_pipeline_entity_iter_init(struct media_pipeline *pipe, struct media_pipeline_entity_iter *iter); /** * media_pipeline_entity_iter_cleanup - Destroy a pipeline entity iterator * @iter: The iterator * * This function must be called to destroy iterators initialized with * media_pipeline_entity_iter_init(). */ void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter); struct media_entity * __media_pipeline_entity_iter_next(struct media_pipeline *pipe, struct media_pipeline_entity_iter *iter, struct media_entity *entity); /** * media_pipeline_for_each_entity - Iterate on all entities in a media pipeline * @pipe: The pipeline * @iter: The iterator (struct media_pipeline_entity_iter) * @entity: The iterator entity * * Iterate on all entities in a media pipeline. This is only valid after the * pipeline has been built with media_pipeline_start() and before it gets * destroyed with media_pipeline_stop(). The iterator must be initialized with * media_pipeline_entity_iter_init() before iteration, and destroyed with * media_pipeline_entity_iter_cleanup() after (including in code paths that * break from the loop). */ #define media_pipeline_for_each_entity(pipe, iter, entity) \ for (entity = __media_pipeline_entity_iter_next((pipe), iter, NULL); \ entity != NULL; \ entity = __media_pipeline_entity_iter_next((pipe), iter, entity)) /** * media_pipeline_alloc_start - Mark a pipeline as streaming * @pad: Starting pad * * media_pipeline_alloc_start() is similar to media_pipeline_start() but instead * of working on a given pipeline the function will use an existing pipeline if * the pad is already part of a pipeline, or allocate a new pipeline. * * Calls to media_pipeline_alloc_start() must be matched with * media_pipeline_stop(). */ __must_check int media_pipeline_alloc_start(struct media_pad *pad); /** * media_devnode_create() - creates and initializes a device node interface * * @mdev: pointer to struct &media_device * @type: type of the interface, as given by * :ref:`include/uapi/linux/media.h <media_header>` * ( seek for ``MEDIA_INTF_T_*``) macros. * @flags: Interface flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * ( seek for ``MEDIA_INTF_FL_*``) * @major: Device node major number. * @minor: Device node minor number. * * Return: if succeeded, returns a pointer to the newly allocated * &media_intf_devnode pointer. * * .. note:: * * Currently, no flags for &media_interface is defined. */ struct media_intf_devnode * __must_check media_devnode_create(struct media_device *mdev, u32 type, u32 flags, u32 major, u32 minor); /** * media_devnode_remove() - removes a device node interface * * @devnode: pointer to &media_intf_devnode to be freed. * * When a device node interface is removed, all links to it are automatically * removed. */ void media_devnode_remove(struct media_intf_devnode *devnode); /** * media_create_intf_link() - creates a link between an entity and an interface * * @entity: pointer to %media_entity * @intf: pointer to %media_interface * @flags: Link flags, as defined in * :ref:`include/uapi/linux/media.h <media_header>` * ( seek for ``MEDIA_LNK_FL_*``) * * * Valid values for flags: * * %MEDIA_LNK_FL_ENABLED * Indicates that the interface is connected to the entity hardware. * That's the default value for interfaces. An interface may be disabled if * the hardware is busy due to the usage of some other interface that it is * currently controlling the hardware. * * A typical example is an hybrid TV device that handle only one type of * stream on a given time. So, when the digital TV is streaming, * the V4L2 interfaces won't be enabled, as such device is not able to * also stream analog TV or radio. * * .. note:: * * Before calling this function, media_devnode_create() should be called for * the interface and media_device_register_entity() should be called for the * interface that will be part of the link. */ struct media_link * __must_check media_create_intf_link(struct media_entity *entity, struct media_interface *intf, u32 flags); /** * __media_remove_intf_link() - remove a single interface link * * @link: pointer to &media_link. * * .. note:: This is an unlocked version of media_remove_intf_link() */ void __media_remove_intf_link(struct media_link *link); /** * media_remove_intf_link() - remove a single interface link * * @link: pointer to &media_link. * * .. note:: Prefer to use this one, instead of __media_remove_intf_link() */ void media_remove_intf_link(struct media_link *link); /** * __media_remove_intf_links() - remove all links associated with an interface * * @intf: pointer to &media_interface * * .. note:: This is an unlocked version of media_remove_intf_links(). */ void __media_remove_intf_links(struct media_interface *intf); /** * media_remove_intf_links() - remove all links associated with an interface * * @intf: pointer to &media_interface * * .. note:: * * #) This is called automatically when an entity is unregistered via * media_device_register_entity() and by media_devnode_remove(). * * #) Prefer to use this one, instead of __media_remove_intf_links(). */ void media_remove_intf_links(struct media_interface *intf); /** * media_entity_call - Calls a struct media_entity_operations operation on * an entity * * @entity: entity where the @operation will be called * @operation: type of the operation. Should be the name of a member of * struct &media_entity_operations. * * This helper function will check if @operation is not %NULL. On such case, * it will issue a call to @operation\(@entity, @args\). */ #define media_entity_call(entity, operation, args...) \ (((entity)->ops && (entity)->ops->operation) ? \ (entity)->ops->operation((entity) , ##args) : -ENOIOCTLCMD) /** * media_create_ancillary_link() - create an ancillary link between two * instances of &media_entity * * @primary: pointer to the primary &media_entity * @ancillary: pointer to the ancillary &media_entity * * Create an ancillary link between two entities, indicating that they * represent two connected pieces of hardware that form a single logical unit. * A typical example is a camera lens controller being linked to the sensor that * it is supporting. * * The function sets both MEDIA_LNK_FL_ENABLED and MEDIA_LNK_FL_IMMUTABLE for * the new link. */ struct media_link * media_create_ancillary_link(struct media_entity *primary, struct media_entity *ancillary); /** * __media_entity_next_link() - Iterate through a &media_entity's links * * @entity: pointer to the &media_entity * @link: pointer to a &media_link to hold the iterated values * @link_type: one of the MEDIA_LNK_FL_LINK_TYPE flags * * Return the next link against an entity matching a specific link type. This * allows iteration through an entity's links whilst guaranteeing all of the * returned links are of the given type. */ struct media_link *__media_entity_next_link(struct media_entity *entity, struct media_link *link, unsigned long link_type); /** * for_each_media_entity_data_link() - Iterate through an entity's data links * * @entity: pointer to the &media_entity * @link: pointer to a &media_link to hold the iterated values * * Iterate over a &media_entity's data links */ #define for_each_media_entity_data_link(entity, link) \ for (link = __media_entity_next_link(entity, NULL, \ MEDIA_LNK_FL_DATA_LINK); \ link; \ link = __media_entity_next_link(entity, link, \ MEDIA_LNK_FL_DATA_LINK)) #endif
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 3 3 2 2 3 3 3 3 3 1 1 21 21 19 19 19 19 19 16 15 6 5 14 1 1 1 2 1 1 8 40 40 39 25 39 39 22 17 36 21 21 1 1 1 1 1 22 21 22 8 8 14 13 13 1 13 9 13 13 9 13 13 22 35 35 1 3 3 1 3 2 1 1 1 4 4 1 3 1 3 1 1 1 3 3 2 3 1 1 3 3 3 3 4 5 2 6 6 6 5 2 2 2 2 2 89 19 79 79 89 89 89 88 89 89 89 44 44 44 44 32 44 25 8 8 8 24 25 7 25 25 36 13 36 44 14 44 44 25 43 44 20 4 4 1 6 4 2 2 23 23 23 23 23 23 23 23 23 4 19 20 20 20 19 20 20 20 23 20 23 23 23 23 23 23 23 23 18 18 18 18 18 18 17 5 17 17 26 27 27 27 27 24 25 25 64 5 3 3 64 1 1 64 64 1 64 64 1 64 1 64 64 62 16 51 64 25 24 25 25 12 25 16 25 6 6 27 26 27 27 18 18 18 27 27 25 26 26 19 26 19 2 2 2 2 26 17 3 19 8 17 15 3 15 25 25 6 6 6 9 24 9 9 9 9 14 14 2 2 14 14 14 14 14 14 4 4 1 3 2 2 3 3 3 3 2 4 4 2 2 2 4 4 4 4 4 4 4 4 2 3 3 3 3 3 3 3 2 2 2 2 2 4 4 4 4 4 4 4 4 4 3 5 5 4 4 3 4 2 3 3 3 3 3 3 3 3 3 5 5 5 5 1 4 3 3 4 3 2 3 3 3 3 3 2 2 3 5 5 1 4 1 1 1 1 1 1 1 2 14 13 13 2 11 13 13 5 5 5 5 5 5 5 5 5 4 2 8 10 5 5 5 9 4 5 29 29 29 29 29 29 28 28 6 27 27 4 4 23 5 19 5 15 3 15 22 14 23 24 8 10 9 8 8 1 1 5 5 5 5 5 5 1 4 5 8 8 5 5 5 2 11 10 9 9 8 11 8 8 7 7 7 7 7 7 7 5 5 1 5 7 5 5 5 8 9 9 8 8 5 4 5 3 8 2 1 1 1 5 2 1 3 3 2 2 2 5 5 5 5 5 5 1 5 7 6 6 6 7 6 5 5 6 1 1 1 1 1 1 1 1 1 1 1 4 4 3 3 3 2 3 3 2 2 2 1 1 1 1 1 1 1 1 2 3 1 1 1 1 1 1 1 1 3 3 3 3 3 1 1 1 1 1 2 2 2 2 2 1 1 1 1 1 1 1 1 1 3 3 3 3 2 3 3 3 3 5 6 8 8 7 8 8 4 8 8 1 8 6 6 6 5 5 5 5 6 4 4 4 4 6 5 5 5 5 6 5 5 5 5 5 5 4 4 4 4 4 5 1 2 1 4 4 3 4 2 3 2 2 2 2 2 2 2 77 1 1 1 1 1 1 1 1 4 4 4 3 4 3 4 1 4 4 4 4 4 4 1 3 2 2 2 2 1 1 1 1 1 1 1 1 1 1 1 3 3 1 80 2 10 1 9 2 5 7 1 1 1 1 1 1 1 1 1 4 3 1 3 3 2 6 3 77 1 1 20 5 3 20 1 80 78 16 80 61 49 25 21 21 15 3 5 5 5 5 1 4 4 1 2 3 4 1 5 13 13 13 5 13 5 8 13 9 9 9 9 53 53 52 52 7 4 43 51 40 32 18 26 14 14 14 14 1 14 14 14 40 40 9 9 3 2 2 13 2 1 11 11 6 11 11 2 2 9 10 6 11 2 1 2 1 2 2 2 1 2 2 13 54 54 54 2 51 54 1 54 51 14 50 51 50 50 40 50 51 1 1 1 50 51 54 54 53 40 18 2 49 5 4 5 3 5 4 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 // SPDX-License-Identifier: GPL-2.0 /* * fs/f2fs/file.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/stat.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/falloc.h> #include <linux/types.h> #include <linux/compat.h> #include <linux/uaccess.h> #include <linux/mount.h> #include <linux/pagevec.h> #include <linux/uio.h> #include <linux/uuid.h> #include <linux/file.h> #include <linux/nls.h> #include <linux/sched/signal.h> #include <linux/fileattr.h> #include <linux/fadvise.h> #include <linux/iomap.h> #include "f2fs.h" #include "node.h" #include "segment.h" #include "xattr.h" #include "acl.h" #include "gc.h" #include "iostat.h" #include <trace/events/f2fs.h> #include <uapi/linux/f2fs.h> static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) { struct inode *inode = file_inode(vmf->vma->vm_file); vm_flags_t flags = vmf->vma->vm_flags; vm_fault_t ret; ret = filemap_fault(vmf); if (ret & VM_FAULT_LOCKED) f2fs_update_iostat(F2FS_I_SB(inode), inode, APP_MAPPED_READ_IO, F2FS_BLKSIZE); trace_f2fs_filemap_fault(inode, vmf->pgoff, flags, ret); return ret; } static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) { struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; bool need_alloc = !f2fs_is_pinned_file(inode); int err = 0; vm_fault_t ret; if (unlikely(IS_IMMUTABLE(inode))) return VM_FAULT_SIGBUS; if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { err = -EIO; goto out; } if (unlikely(f2fs_cp_error(sbi))) { err = -EIO; goto out; } if (!f2fs_is_checkpoint_ready(sbi)) { err = -ENOSPC; goto out; } err = f2fs_convert_inline_inode(inode); if (err) goto out; #ifdef CONFIG_F2FS_FS_COMPRESSION if (f2fs_compressed_file(inode)) { int ret = f2fs_is_compressed_cluster(inode, folio->index); if (ret < 0) { err = ret; goto out; } else if (ret) { need_alloc = false; } } #endif /* should do out of any locked page */ if (need_alloc) f2fs_balance_fs(sbi, true); sb_start_pagefault(inode->i_sb); f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); file_update_time(vmf->vma->vm_file); filemap_invalidate_lock_shared(inode->i_mapping); folio_lock(folio); if (unlikely(folio->mapping != inode->i_mapping || folio_pos(folio) > i_size_read(inode) || !folio_test_uptodate(folio))) { folio_unlock(folio); err = -EFAULT; goto out_sem; } set_new_dnode(&dn, inode, NULL, NULL, 0); if (need_alloc) { /* block allocation */ err = f2fs_get_block_locked(&dn, folio->index); } else { err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE); f2fs_put_dnode(&dn); if (f2fs_is_pinned_file(inode) && !__is_valid_data_blkaddr(dn.data_blkaddr)) err = -EIO; } if (err) { folio_unlock(folio); goto out_sem; } f2fs_wait_on_page_writeback(folio_page(folio, 0), DATA, false, true); /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); /* * check to see if the page is mapped already (no holes) */ if (folio_test_mappedtodisk(folio)) goto out_sem; /* page is wholly or partially inside EOF */ if (((loff_t)(folio->index + 1) << PAGE_SHIFT) > i_size_read(inode)) { loff_t offset; offset = i_size_read(inode) & ~PAGE_MASK; folio_zero_segment(folio, offset, folio_size(folio)); } folio_mark_dirty(folio); f2fs_update_iostat(sbi, inode, APP_MAPPED_IO, F2FS_BLKSIZE); f2fs_update_time(sbi, REQ_TIME); out_sem: filemap_invalidate_unlock_shared(inode->i_mapping); sb_end_pagefault(inode->i_sb); out: ret = vmf_fs_error(err); trace_f2fs_vm_page_mkwrite(inode, folio->index, vmf->vma->vm_flags, ret); return ret; } static const struct vm_operations_struct f2fs_file_vm_ops = { .fault = f2fs_filemap_fault, .map_pages = filemap_map_pages, .page_mkwrite = f2fs_vm_page_mkwrite, }; static int get_parent_ino(struct inode *inode, nid_t *pino) { struct dentry *dentry; /* * Make sure to get the non-deleted alias. The alias associated with * the open file descriptor being fsync()'ed may be deleted already. */ dentry = d_find_alias(inode); if (!dentry) return 0; *pino = d_parent_ino(dentry); dput(dentry); return 1; } static inline enum cp_reason_type need_do_checkpoint(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); enum cp_reason_type cp_reason = CP_NO_NEEDED; if (!S_ISREG(inode->i_mode)) cp_reason = CP_NON_REGULAR; else if (f2fs_compressed_file(inode)) cp_reason = CP_COMPRESSED; else if (inode->i_nlink != 1) cp_reason = CP_HARDLINK; else if (is_sbi_flag_set(sbi, SBI_NEED_CP)) cp_reason = CP_SB_NEED_CP; else if (file_wrong_pino(inode)) cp_reason = CP_WRONG_PINO; else if (!f2fs_space_for_roll_forward(sbi)) cp_reason = CP_NO_SPC_ROLL; else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) cp_reason = CP_NODE_NEED_CP; else if (test_opt(sbi, FASTBOOT)) cp_reason = CP_FASTBOOT_MODE; else if (F2FS_OPTION(sbi).active_logs == 2) cp_reason = CP_SPEC_LOG_NUM; else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && f2fs_need_dentry_mark(sbi, inode->i_ino) && f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, TRANS_DIR_INO)) cp_reason = CP_RECOVER_DIR; else if (f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino, XATTR_DIR_INO)) cp_reason = CP_XATTR_DIR; return cp_reason; } static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino) { struct page *i = find_get_page(NODE_MAPPING(sbi), ino); bool ret = false; /* But we need to avoid that there are some inode updates */ if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino)) ret = true; f2fs_put_page(i, 0); return ret; } static void try_to_fix_pino(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); nid_t pino; f2fs_down_write(&fi->i_sem); if (file_wrong_pino(inode) && inode->i_nlink == 1 && get_parent_ino(inode, &pino)) { f2fs_i_pino_write(inode, pino); file_got_pino(inode); } f2fs_up_write(&fi->i_sem); } static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, int datasync, bool atomic) { struct inode *inode = file->f_mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); nid_t ino = inode->i_ino; int ret = 0; enum cp_reason_type cp_reason = 0; struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = LONG_MAX, .for_reclaim = 0, }; unsigned int seq_id = 0; if (unlikely(f2fs_readonly(inode->i_sb))) return 0; trace_f2fs_sync_file_enter(inode); if (S_ISDIR(inode->i_mode)) goto go_write; /* if fdatasync is triggered, let's do in-place-update */ if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks) set_inode_flag(inode, FI_NEED_IPU); ret = file_write_and_wait_range(file, start, end); clear_inode_flag(inode, FI_NEED_IPU); if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); return ret; } /* if the inode is dirty, let's recover all the time */ if (!f2fs_skip_inode_update(inode, datasync)) { f2fs_write_inode(inode, NULL); goto go_write; } /* * if there is no written data, don't waste time to write recovery info. */ if (!is_inode_flag_set(inode, FI_APPEND_WRITE) && !f2fs_exist_written_data(sbi, ino, APPEND_INO)) { /* it may call write_inode just prior to fsync */ if (need_inode_page_update(sbi, ino)) goto go_write; if (is_inode_flag_set(inode, FI_UPDATE_WRITE) || f2fs_exist_written_data(sbi, ino, UPDATE_INO)) goto flush_out; goto out; } else { /* * for OPU case, during fsync(), node can be persisted before * data when lower device doesn't support write barrier, result * in data corruption after SPO. * So for strict fsync mode, force to use atomic write semantics * to keep write order in between data/node and last node to * avoid potential data corruption. */ if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT && !atomic) atomic = true; } go_write: /* * Both of fdatasync() and fsync() are able to be recovered from * sudden-power-off. */ f2fs_down_read(&F2FS_I(inode)->i_sem); cp_reason = need_do_checkpoint(inode); f2fs_up_read(&F2FS_I(inode)->i_sem); if (cp_reason) { /* all the dirty node pages should be flushed for POR */ ret = f2fs_sync_fs(inode->i_sb, 1); /* * We've secured consistency through sync_fs. Following pino * will be used only for fsynced inodes after checkpoint. */ try_to_fix_pino(inode); clear_inode_flag(inode, FI_APPEND_WRITE); clear_inode_flag(inode, FI_UPDATE_WRITE); goto out; } sync_nodes: atomic_inc(&sbi->wb_sync_req[NODE]); ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id); atomic_dec(&sbi->wb_sync_req[NODE]); if (ret) goto out; /* if cp_error was enabled, we should avoid infinite loop */ if (unlikely(f2fs_cp_error(sbi))) { ret = -EIO; goto out; } if (f2fs_need_inode_block_update(sbi, ino)) { f2fs_mark_inode_dirty_sync(inode, true); f2fs_write_inode(inode, NULL); goto sync_nodes; } /* * If it's atomic_write, it's just fine to keep write ordering. So * here we don't need to wait for node write completion, since we use * node chain which serializes node blocks. If one of node writes are * reordered, we can see simply broken chain, resulting in stopping * roll-forward recovery. It means we'll recover all or none node blocks * given fsync mark. */ if (!atomic) { ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id); if (ret) goto out; } /* once recovery info is written, don't need to tack this */ f2fs_remove_ino_entry(sbi, ino, APPEND_INO); clear_inode_flag(inode, FI_APPEND_WRITE); flush_out: if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER) ret = f2fs_issue_flush(sbi, inode->i_ino); if (!ret) { f2fs_remove_ino_entry(sbi, ino, UPDATE_INO); clear_inode_flag(inode, FI_UPDATE_WRITE); f2fs_remove_ino_entry(sbi, ino, FLUSH_INO); } f2fs_update_time(sbi, REQ_TIME); out: trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret); return ret; } int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) return -EIO; return f2fs_do_sync_file(file, start, end, datasync, false); } static bool __found_offset(struct address_space *mapping, struct dnode_of_data *dn, pgoff_t index, int whence) { block_t blkaddr = f2fs_data_blkaddr(dn); struct inode *inode = mapping->host; bool compressed_cluster = false; if (f2fs_compressed_file(inode)) { block_t first_blkaddr = data_blkaddr(dn->inode, dn->node_page, ALIGN_DOWN(dn->ofs_in_node, F2FS_I(inode)->i_cluster_size)); compressed_cluster = first_blkaddr == COMPRESS_ADDR; } switch (whence) { case SEEK_DATA: if (__is_valid_data_blkaddr(blkaddr)) return true; if (blkaddr == NEW_ADDR && xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY)) return true; if (compressed_cluster) return true; break; case SEEK_HOLE: if (compressed_cluster) return false; if (blkaddr == NULL_ADDR) return true; break; } return false; } static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode)); struct dnode_of_data dn; pgoff_t pgofs, end_offset; loff_t data_ofs = offset; loff_t isize; int err = 0; inode_lock_shared(inode); isize = i_size_read(inode); if (offset >= isize) goto fail; /* handle inline data case */ if (f2fs_has_inline_data(inode)) { if (whence == SEEK_HOLE) { data_ofs = isize; goto found; } else if (whence == SEEK_DATA) { data_ofs = offset; goto found; } } pgofs = (pgoff_t)(offset >> PAGE_SHIFT); for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); if (err && err != -ENOENT) { goto fail; } else if (err == -ENOENT) { /* direct node does not exists */ if (whence == SEEK_DATA) { pgofs = f2fs_get_next_page_offset(&dn, pgofs); continue; } else { goto found; } } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; dn.ofs_in_node++, pgofs++, data_ofs = (loff_t)pgofs << PAGE_SHIFT) { block_t blkaddr; blkaddr = f2fs_data_blkaddr(&dn); if (__is_valid_data_blkaddr(blkaddr) && !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), blkaddr, DATA_GENERIC_ENHANCE)) { f2fs_put_dnode(&dn); goto fail; } if (__found_offset(file->f_mapping, &dn, pgofs, whence)) { f2fs_put_dnode(&dn); goto found; } } f2fs_put_dnode(&dn); } if (whence == SEEK_DATA) goto fail; found: if (whence == SEEK_HOLE && data_ofs > isize) data_ofs = isize; inode_unlock_shared(inode); return vfs_setpos(file, data_ofs, maxbytes); fail: inode_unlock_shared(inode); return -ENXIO; } static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence) { struct inode *inode = file->f_mapping->host; loff_t maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode)); switch (whence) { case SEEK_SET: case SEEK_CUR: case SEEK_END: return generic_file_llseek_size(file, offset, whence, maxbytes, i_size_read(inode)); case SEEK_DATA: case SEEK_HOLE: if (offset < 0) return -ENXIO; return f2fs_seek_block(file, offset, whence); } return -EINVAL; } static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; if (!f2fs_is_compress_backend_ready(inode)) return -EOPNOTSUPP; file_accessed(file); vma->vm_ops = &f2fs_file_vm_ops; f2fs_down_read(&F2FS_I(inode)->i_sem); set_inode_flag(inode, FI_MMAP_FILE); f2fs_up_read(&F2FS_I(inode)->i_sem); return 0; } static int finish_preallocate_blocks(struct inode *inode) { int ret; inode_lock(inode); if (is_inode_flag_set(inode, FI_OPENED_FILE)) { inode_unlock(inode); return 0; } if (!file_should_truncate(inode)) { set_inode_flag(inode, FI_OPENED_FILE); inode_unlock(inode); return 0; } f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); truncate_setsize(inode, i_size_read(inode)); ret = f2fs_truncate(inode); filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); if (!ret) set_inode_flag(inode, FI_OPENED_FILE); inode_unlock(inode); if (ret) return ret; file_dont_truncate(inode); return 0; } static int f2fs_file_open(struct inode *inode, struct file *filp) { int err = fscrypt_file_open(inode, filp); if (err) return err; if (!f2fs_is_compress_backend_ready(inode)) return -EOPNOTSUPP; err = fsverity_file_open(inode, filp); if (err) return err; filp->f_mode |= FMODE_NOWAIT; filp->f_mode |= FMODE_CAN_ODIRECT; err = dquot_file_open(inode, filp); if (err) return err; return finish_preallocate_blocks(inode); } void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); int nr_free = 0, ofs = dn->ofs_in_node, len = count; __le32 *addr; bool compressed_cluster = false; int cluster_index = 0, valid_blocks = 0; int cluster_size = F2FS_I(dn->inode)->i_cluster_size; bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks); block_t blkstart; int blklen = 0; addr = get_dnode_addr(dn->inode, dn->node_page) + ofs; blkstart = le32_to_cpu(*addr); /* Assumption: truncation starts with cluster */ for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) { block_t blkaddr = le32_to_cpu(*addr); if (f2fs_compressed_file(dn->inode) && !(cluster_index & (cluster_size - 1))) { if (compressed_cluster) f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false); compressed_cluster = (blkaddr == COMPRESS_ADDR); valid_blocks = 0; } if (blkaddr == NULL_ADDR) goto next; f2fs_set_data_blkaddr(dn, NULL_ADDR); if (__is_valid_data_blkaddr(blkaddr)) { if (time_to_inject(sbi, FAULT_BLKADDR_CONSISTENCE)) goto next; if (!f2fs_is_valid_blkaddr_raw(sbi, blkaddr, DATA_GENERIC_ENHANCE)) goto next; if (compressed_cluster) valid_blocks++; } if (blkstart + blklen == blkaddr) { blklen++; } else { f2fs_invalidate_blocks(sbi, blkstart, blklen); blkstart = blkaddr; blklen = 1; } if (!released || blkaddr != COMPRESS_ADDR) nr_free++; continue; next: if (blklen) f2fs_invalidate_blocks(sbi, blkstart, blklen); blkstart = le32_to_cpu(*(addr + 1)); blklen = 0; } if (blklen) f2fs_invalidate_blocks(sbi, blkstart, blklen); if (compressed_cluster) f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false); if (nr_free) { pgoff_t fofs; /* * once we invalidate valid blkaddr in range [ofs, ofs + count], * we will invalidate all blkaddr in the whole range. */ fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + ofs; f2fs_update_read_extent_cache_range(dn, fofs, 0, len); f2fs_update_age_extent_cache_range(dn, fofs, len); dec_valid_block_count(sbi, dn->inode, nr_free); } dn->ofs_in_node = ofs; f2fs_update_time(sbi, REQ_TIME); trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, dn->ofs_in_node, nr_free); } static int truncate_partial_data_page(struct inode *inode, u64 from, bool cache_only) { loff_t offset = from & (PAGE_SIZE - 1); pgoff_t index = from >> PAGE_SHIFT; struct address_space *mapping = inode->i_mapping; struct page *page; if (!offset && !cache_only) return 0; if (cache_only) { page = find_lock_page(mapping, index); if (page && PageUptodate(page)) goto truncate_out; f2fs_put_page(page, 1); return 0; } page = f2fs_get_lock_data_page(inode, index, true); if (IS_ERR(page)) return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page); truncate_out: f2fs_wait_on_page_writeback(page, DATA, true, true); zero_user(page, offset, PAGE_SIZE - offset); /* An encrypted inode should have a key and truncate the last page. */ f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode)); if (!cache_only) set_page_dirty(page); f2fs_put_page(page, 1); return 0; } int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; pgoff_t free_from; int count = 0, err = 0; struct page *ipage; bool truncate_page = false; trace_f2fs_truncate_blocks_enter(inode, from); if (IS_DEVICE_ALIASING(inode) && from) { err = -EINVAL; goto out_err; } free_from = (pgoff_t)F2FS_BLK_ALIGN(from); if (free_from >= max_file_blocks(inode)) goto free_partial; if (lock) f2fs_lock_op(sbi); ipage = f2fs_get_node_page(sbi, inode->i_ino); if (IS_ERR(ipage)) { err = PTR_ERR(ipage); goto out; } if (IS_DEVICE_ALIASING(inode)) { struct extent_tree *et = F2FS_I(inode)->extent_tree[EX_READ]; struct extent_info ei = et->largest; f2fs_invalidate_blocks(sbi, ei.blk, ei.len); dec_valid_block_count(sbi, inode, ei.len); f2fs_update_time(sbi, REQ_TIME); f2fs_put_page(ipage, 1); goto out; } if (f2fs_has_inline_data(inode)) { f2fs_truncate_inline_inode(inode, ipage, from); f2fs_put_page(ipage, 1); truncate_page = true; goto out; } set_new_dnode(&dn, inode, ipage, NULL, 0); err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); if (err) { if (err == -ENOENT) goto free_next; goto out; } count = ADDRS_PER_PAGE(dn.node_page, inode); count -= dn.ofs_in_node; f2fs_bug_on(sbi, count < 0); if (dn.ofs_in_node || IS_INODE(dn.node_page)) { f2fs_truncate_data_blocks_range(&dn, count); free_from += count; } f2fs_put_dnode(&dn); free_next: err = f2fs_truncate_inode_blocks(inode, free_from); out: if (lock) f2fs_unlock_op(sbi); free_partial: /* lastly zero out the first data page */ if (!err) err = truncate_partial_data_page(inode, from, truncate_page); out_err: trace_f2fs_truncate_blocks_exit(inode, err); return err; } int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock) { u64 free_from = from; int err; #ifdef CONFIG_F2FS_FS_COMPRESSION /* * for compressed file, only support cluster size * aligned truncation. */ if (f2fs_compressed_file(inode)) free_from = round_up(from, F2FS_I(inode)->i_cluster_size << PAGE_SHIFT); #endif err = f2fs_do_truncate_blocks(inode, free_from, lock); if (err) return err; #ifdef CONFIG_F2FS_FS_COMPRESSION /* * For compressed file, after release compress blocks, don't allow write * direct, but we should allow write direct after truncate to zero. */ if (f2fs_compressed_file(inode) && !free_from && is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) clear_inode_flag(inode, FI_COMPRESS_RELEASED); if (from != free_from) { err = f2fs_truncate_partial_cluster(inode, from, lock); if (err) return err; } #endif return 0; } int f2fs_truncate(struct inode *inode) { int err; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) return 0; trace_f2fs_truncate(inode); if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) return -EIO; err = f2fs_dquot_initialize(inode); if (err) return err; /* we should check inline_data size */ if (!f2fs_may_inline_data(inode)) { err = f2fs_convert_inline_inode(inode); if (err) return err; } err = f2fs_truncate_blocks(inode, i_size_read(inode), true); if (err) return err; inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); f2fs_mark_inode_dirty_sync(inode, false); return 0; } static bool f2fs_force_buffered_io(struct inode *inode, int rw) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (!fscrypt_dio_supported(inode)) return true; if (fsverity_active(inode)) return true; if (f2fs_compressed_file(inode)) return true; /* * only force direct read to use buffered IO, for direct write, * it expects inline data conversion before committing IO. */ if (f2fs_has_inline_data(inode) && rw == READ) return true; /* disallow direct IO if any of devices has unaligned blksize */ if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize) return true; /* * for blkzoned device, fallback direct IO to buffered IO, so * all IOs can be serialized by log-structured write. */ if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE) && !f2fs_is_pinned_file(inode)) return true; if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) return true; return false; } int f2fs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_inode *ri = NULL; unsigned int flags; if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) && F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) { stat->result_mask |= STATX_BTIME; stat->btime.tv_sec = fi->i_crtime.tv_sec; stat->btime.tv_nsec = fi->i_crtime.tv_nsec; } /* * Return the DIO alignment restrictions if requested. We only return * this information when requested, since on encrypted files it might * take a fair bit of work to get if the file wasn't opened recently. * * f2fs sometimes supports DIO reads but not DIO writes. STATX_DIOALIGN * cannot represent that, so in that case we report no DIO support. */ if ((request_mask & STATX_DIOALIGN) && S_ISREG(inode->i_mode)) { unsigned int bsize = i_blocksize(inode); stat->result_mask |= STATX_DIOALIGN; if (!f2fs_force_buffered_io(inode, WRITE)) { stat->dio_mem_align = bsize; stat->dio_offset_align = bsize; } } flags = fi->i_flags; if (flags & F2FS_COMPR_FL) stat->attributes |= STATX_ATTR_COMPRESSED; if (flags & F2FS_APPEND_FL) stat->attributes |= STATX_ATTR_APPEND; if (IS_ENCRYPTED(inode)) stat->attributes |= STATX_ATTR_ENCRYPTED; if (flags & F2FS_IMMUTABLE_FL) stat->attributes |= STATX_ATTR_IMMUTABLE; if (flags & F2FS_NODUMP_FL) stat->attributes |= STATX_ATTR_NODUMP; if (IS_VERITY(inode)) stat->attributes |= STATX_ATTR_VERITY; stat->attributes_mask |= (STATX_ATTR_COMPRESSED | STATX_ATTR_APPEND | STATX_ATTR_ENCRYPTED | STATX_ATTR_IMMUTABLE | STATX_ATTR_NODUMP | STATX_ATTR_VERITY); generic_fillattr(idmap, request_mask, inode, stat); /* we need to show initial sectors used for inline_data/dentries */ if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) || f2fs_has_inline_dentry(inode)) stat->blocks += (stat->size + 511) >> 9; return 0; } #ifdef CONFIG_F2FS_FS_POSIX_ACL static void __setattr_copy(struct mnt_idmap *idmap, struct inode *inode, const struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; i_uid_update(idmap, attr, inode); i_gid_update(idmap, attr, inode); if (ia_valid & ATTR_ATIME) inode_set_atime_to_ts(inode, attr->ia_atime); if (ia_valid & ATTR_MTIME) inode_set_mtime_to_ts(inode, attr->ia_mtime); if (ia_valid & ATTR_CTIME) inode_set_ctime_to_ts(inode, attr->ia_ctime); if (ia_valid & ATTR_MODE) { umode_t mode = attr->ia_mode; if (!in_group_or_capable(idmap, inode, i_gid_into_vfsgid(idmap, inode))) mode &= ~S_ISGID; set_acl_inode(inode, mode); } } #else #define __setattr_copy setattr_copy #endif int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct f2fs_inode_info *fi = F2FS_I(inode); int err; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; if (unlikely(IS_IMMUTABLE(inode))) return -EPERM; if (unlikely(IS_APPEND(inode) && (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)))) return -EPERM; if ((attr->ia_valid & ATTR_SIZE)) { if (!f2fs_is_compress_backend_ready(inode) || IS_DEVICE_ALIASING(inode)) return -EOPNOTSUPP; if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) && !IS_ALIGNED(attr->ia_size, F2FS_BLK_TO_BYTES(fi->i_cluster_size))) return -EINVAL; } err = setattr_prepare(idmap, dentry, attr); if (err) return err; err = fscrypt_prepare_setattr(dentry, attr); if (err) return err; err = fsverity_prepare_setattr(dentry, attr); if (err) return err; if (is_quota_modification(idmap, inode, attr)) { err = f2fs_dquot_initialize(inode); if (err) return err; } if (i_uid_needs_update(idmap, attr, inode) || i_gid_needs_update(idmap, attr, inode)) { f2fs_lock_op(F2FS_I_SB(inode)); err = dquot_transfer(idmap, inode, attr); if (err) { set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR); f2fs_unlock_op(F2FS_I_SB(inode)); return err; } /* * update uid/gid under lock_op(), so that dquot and inode can * be updated atomically. */ i_uid_update(idmap, attr, inode); i_gid_update(idmap, attr, inode); f2fs_mark_inode_dirty_sync(inode, true); f2fs_unlock_op(F2FS_I_SB(inode)); } if (attr->ia_valid & ATTR_SIZE) { loff_t old_size = i_size_read(inode); if (attr->ia_size > MAX_INLINE_DATA(inode)) { /* * should convert inline inode before i_size_write to * keep smaller than inline_data size with inline flag. */ err = f2fs_convert_inline_inode(inode); if (err) return err; } /* * wait for inflight dio, blocks should be removed after * IO completion. */ if (attr->ia_size < old_size) inode_dio_wait(inode); f2fs_down_write(&fi->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); truncate_setsize(inode, attr->ia_size); if (attr->ia_size <= old_size) err = f2fs_truncate(inode); /* * do not trim all blocks after i_size if target size is * larger than i_size. */ filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&fi->i_gc_rwsem[WRITE]); if (err) return err; spin_lock(&fi->i_size_lock); inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); fi->last_disk_size = i_size_read(inode); spin_unlock(&fi->i_size_lock); } __setattr_copy(idmap, inode, attr); if (attr->ia_valid & ATTR_MODE) { err = posix_acl_chmod(idmap, dentry, f2fs_get_inode_mode(inode)); if (is_inode_flag_set(inode, FI_ACL_MODE)) { if (!err) inode->i_mode = fi->i_acl_mode; clear_inode_flag(inode, FI_ACL_MODE); } } /* file size may changed here */ f2fs_mark_inode_dirty_sync(inode, true); /* inode change will produce dirty node pages flushed by checkpoint */ f2fs_balance_fs(F2FS_I_SB(inode), true); return err; } const struct inode_operations f2fs_file_inode_operations = { .getattr = f2fs_getattr, .setattr = f2fs_setattr, .get_inode_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, .listxattr = f2fs_listxattr, .fiemap = f2fs_fiemap, .fileattr_get = f2fs_fileattr_get, .fileattr_set = f2fs_fileattr_set, }; static int fill_zero(struct inode *inode, pgoff_t index, loff_t start, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page; if (!len) return 0; f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); page = f2fs_get_new_data_page(inode, NULL, index, false); f2fs_unlock_op(sbi); if (IS_ERR(page)) return PTR_ERR(page); f2fs_wait_on_page_writeback(page, DATA, true, true); zero_user(page, start, len); set_page_dirty(page); f2fs_put_page(page, 1); return 0; } int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end) { int err; while (pg_start < pg_end) { struct dnode_of_data dn; pgoff_t end_offset, count; set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE); if (err) { if (err == -ENOENT) { pg_start = f2fs_get_next_page_offset(&dn, pg_start); continue; } return err; } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, pg_end - pg_start); f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset); f2fs_truncate_data_blocks_range(&dn, count); f2fs_put_dnode(&dn); pg_start += count; } return 0; } static int f2fs_punch_hole(struct inode *inode, loff_t offset, loff_t len) { pgoff_t pg_start, pg_end; loff_t off_start, off_end; int ret; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; off_start = offset & (PAGE_SIZE - 1); off_end = (offset + len) & (PAGE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, off_end - off_start); if (ret) return ret; } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, PAGE_SIZE - off_start); if (ret) return ret; } if (off_end) { ret = fill_zero(inode, pg_end, 0, off_end); if (ret) return ret; } if (pg_start < pg_end) { loff_t blk_start, blk_end; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); f2fs_balance_fs(sbi, true); blk_start = (loff_t)pg_start << PAGE_SHIFT; blk_end = (loff_t)pg_end << PAGE_SHIFT; f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); truncate_pagecache_range(inode, blk_start, blk_end - 1); f2fs_lock_op(sbi); ret = f2fs_truncate_hole(inode, pg_start, pg_end); f2fs_unlock_op(sbi); filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } } return ret; } static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr, int *do_replace, pgoff_t off, pgoff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; int ret, done, i; next_dnode: set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA); if (ret && ret != -ENOENT) { return ret; } else if (ret == -ENOENT) { if (dn.max_level == 0) return -ENOENT; done = min((pgoff_t)ADDRS_PER_BLOCK(inode) - dn.ofs_in_node, len); blkaddr += done; do_replace += done; goto next; } done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) - dn.ofs_in_node, len); for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) { *blkaddr = f2fs_data_blkaddr(&dn); if (__is_valid_data_blkaddr(*blkaddr) && !f2fs_is_valid_blkaddr(sbi, *blkaddr, DATA_GENERIC_ENHANCE)) { f2fs_put_dnode(&dn); return -EFSCORRUPTED; } if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { if (f2fs_lfs_mode(sbi)) { f2fs_put_dnode(&dn); return -EOPNOTSUPP; } /* do not invalidate this block address */ f2fs_update_data_blkaddr(&dn, NULL_ADDR); *do_replace = 1; } } f2fs_put_dnode(&dn); next: len -= done; off += done; if (len) goto next_dnode; return 0; } static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr, int *do_replace, pgoff_t off, int len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct dnode_of_data dn; int ret, i; for (i = 0; i < len; i++, do_replace++, blkaddr++) { if (*do_replace == 0) continue; set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA); if (ret) { dec_valid_block_count(sbi, inode, 1); f2fs_invalidate_blocks(sbi, *blkaddr, 1); } else { f2fs_update_data_blkaddr(&dn, *blkaddr); } f2fs_put_dnode(&dn); } return 0; } static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, block_t *blkaddr, int *do_replace, pgoff_t src, pgoff_t dst, pgoff_t len, bool full) { struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode); pgoff_t i = 0; int ret; while (i < len) { if (blkaddr[i] == NULL_ADDR && !full) { i++; continue; } if (do_replace[i] || blkaddr[i] == NULL_ADDR) { struct dnode_of_data dn; struct node_info ni; size_t new_size; pgoff_t ilen; set_new_dnode(&dn, dst_inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE); if (ret) return ret; ret = f2fs_get_node_info(sbi, dn.nid, &ni, false); if (ret) { f2fs_put_dnode(&dn); return ret; } ilen = min((pgoff_t) ADDRS_PER_PAGE(dn.node_page, dst_inode) - dn.ofs_in_node, len - i); do { dn.data_blkaddr = f2fs_data_blkaddr(&dn); f2fs_truncate_data_blocks_range(&dn, 1); if (do_replace[i]) { f2fs_i_blocks_write(src_inode, 1, false, false); f2fs_i_blocks_write(dst_inode, 1, true, false); f2fs_replace_block(sbi, &dn, dn.data_blkaddr, blkaddr[i], ni.version, true, false); do_replace[i] = 0; } dn.ofs_in_node++; i++; new_size = (loff_t)(dst + i) << PAGE_SHIFT; if (dst_inode->i_size < new_size) f2fs_i_size_write(dst_inode, new_size); } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); f2fs_put_dnode(&dn); } else { struct page *psrc, *pdst; psrc = f2fs_get_lock_data_page(src_inode, src + i, true); if (IS_ERR(psrc)) return PTR_ERR(psrc); pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i, true); if (IS_ERR(pdst)) { f2fs_put_page(psrc, 1); return PTR_ERR(pdst); } f2fs_wait_on_page_writeback(pdst, DATA, true, true); memcpy_page(pdst, 0, psrc, 0, PAGE_SIZE); set_page_dirty(pdst); set_page_private_gcing(pdst); f2fs_put_page(pdst, 1); f2fs_put_page(psrc, 1); ret = f2fs_truncate_hole(src_inode, src + i, src + i + 1); if (ret) return ret; i++; } } return 0; } static int __exchange_data_block(struct inode *src_inode, struct inode *dst_inode, pgoff_t src, pgoff_t dst, pgoff_t len, bool full) { block_t *src_blkaddr; int *do_replace; pgoff_t olen; int ret; while (len) { olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len); src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode), array_size(olen, sizeof(block_t)), GFP_NOFS); if (!src_blkaddr) return -ENOMEM; do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode), array_size(olen, sizeof(int)), GFP_NOFS); if (!do_replace) { kvfree(src_blkaddr); return -ENOMEM; } ret = __read_out_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); if (ret) goto roll_back; ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr, do_replace, src, dst, olen, full); if (ret) goto roll_back; src += olen; dst += olen; len -= olen; kvfree(src_blkaddr); kvfree(do_replace); } return 0; roll_back: __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen); kvfree(src_blkaddr); kvfree(do_replace); return ret; } static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); pgoff_t start = offset >> PAGE_SHIFT; pgoff_t end = (offset + len) >> PAGE_SHIFT; int ret; f2fs_balance_fs(sbi, true); /* avoid gc operation during block exchange */ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); truncate_pagecache(inode, offset); ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); f2fs_unlock_op(sbi); filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); return ret; } static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) { loff_t new_size; int ret; if (offset + len >= i_size_read(inode)) return -EINVAL; /* collapse range should be aligned to block size of f2fs. */ if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) return -EINVAL; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); if (ret) return ret; ret = f2fs_do_collapse(inode, offset, len); if (ret) return ret; /* write out all moved pages, if possible */ filemap_invalidate_lock(inode->i_mapping); filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); truncate_pagecache(inode, offset); new_size = i_size_read(inode) - len; ret = f2fs_truncate_blocks(inode, new_size, true); filemap_invalidate_unlock(inode->i_mapping); if (!ret) f2fs_i_size_write(inode, new_size); return ret; } static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, pgoff_t end) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); pgoff_t index = start; unsigned int ofs_in_node = dn->ofs_in_node; blkcnt_t count = 0; int ret; for (; index < end; index++, dn->ofs_in_node++) { if (f2fs_data_blkaddr(dn) == NULL_ADDR) count++; } dn->ofs_in_node = ofs_in_node; ret = f2fs_reserve_new_blocks(dn, count); if (ret) return ret; dn->ofs_in_node = ofs_in_node; for (index = start; index < end; index++, dn->ofs_in_node++) { dn->data_blkaddr = f2fs_data_blkaddr(dn); /* * f2fs_reserve_new_blocks will not guarantee entire block * allocation. */ if (dn->data_blkaddr == NULL_ADDR) { ret = -ENOSPC; break; } if (dn->data_blkaddr == NEW_ADDR) continue; if (!f2fs_is_valid_blkaddr(sbi, dn->data_blkaddr, DATA_GENERIC_ENHANCE)) { ret = -EFSCORRUPTED; break; } f2fs_invalidate_blocks(sbi, dn->data_blkaddr, 1); f2fs_set_data_blkaddr(dn, NEW_ADDR); } f2fs_update_read_extent_cache_range(dn, start, 0, index - start); f2fs_update_age_extent_cache_range(dn, start, index - start); return ret; } static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct address_space *mapping = inode->i_mapping; pgoff_t index, pg_start, pg_end; loff_t new_size = i_size_read(inode); loff_t off_start, off_end; int ret = 0; ret = inode_newsize_ok(inode, (len + offset)); if (ret) return ret; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); if (ret) return ret; pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; off_start = offset & (PAGE_SIZE - 1); off_end = (offset + len) & (PAGE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, off_end - off_start); if (ret) return ret; new_size = max_t(loff_t, new_size, offset + len); } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, PAGE_SIZE - off_start); if (ret) return ret; new_size = max_t(loff_t, new_size, (loff_t)pg_start << PAGE_SHIFT); } for (index = pg_start; index < pg_end;) { struct dnode_of_data dn; unsigned int end_offset; pgoff_t end; f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(mapping); truncate_pagecache_range(inode, (loff_t)index << PAGE_SHIFT, ((loff_t)pg_end << PAGE_SHIFT) - 1); f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); if (ret) { f2fs_unlock_op(sbi); filemap_invalidate_unlock(mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); goto out; } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); end = min(pg_end, end_offset - dn.ofs_in_node + index); ret = f2fs_do_zero_range(&dn, index, end); f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); filemap_invalidate_unlock(mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_balance_fs(sbi, dn.node_changed); if (ret) goto out; index = end; new_size = max_t(loff_t, new_size, (loff_t)index << PAGE_SHIFT); } if (off_end) { ret = fill_zero(inode, pg_end, 0, off_end); if (ret) goto out; new_size = max_t(loff_t, new_size, offset + len); } } out: if (new_size > i_size_read(inode)) { if (mode & FALLOC_FL_KEEP_SIZE) file_set_keep_isize(inode); else f2fs_i_size_write(inode, new_size); } return ret; } static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct address_space *mapping = inode->i_mapping; pgoff_t nr, pg_start, pg_end, delta, idx; loff_t new_size; int ret = 0; new_size = i_size_read(inode) + len; ret = inode_newsize_ok(inode, new_size); if (ret) return ret; if (offset >= i_size_read(inode)) return -EINVAL; /* insert range should be aligned to block size of f2fs. */ if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1)) return -EINVAL; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; f2fs_balance_fs(sbi, true); filemap_invalidate_lock(mapping); ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); filemap_invalidate_unlock(mapping); if (ret) return ret; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX); if (ret) return ret; pg_start = offset >> PAGE_SHIFT; pg_end = (offset + len) >> PAGE_SHIFT; delta = pg_end - pg_start; idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); /* avoid gc operation during block exchange */ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(mapping); truncate_pagecache(inode, offset); while (!ret && idx > pg_start) { nr = idx - pg_start; if (nr > delta) nr = delta; idx -= nr; f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); ret = __exchange_data_block(inode, inode, idx, idx + delta, nr, false); f2fs_unlock_op(sbi); } filemap_invalidate_unlock(mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); if (ret) return ret; /* write out all moved pages, if possible */ filemap_invalidate_lock(mapping); ret = filemap_write_and_wait_range(mapping, offset, LLONG_MAX); truncate_pagecache(inode, offset); filemap_invalidate_unlock(mapping); if (!ret) f2fs_i_size_write(inode, new_size); return ret; } static int f2fs_expand_inode_data(struct inode *inode, loff_t offset, loff_t len, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_map_blocks map = { .m_next_pgofs = NULL, .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, .m_may_create = true }; struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO, .init_gc_type = FG_GC, .should_migrate_blocks = false, .err_gc_skipped = true, .nr_free_secs = 0 }; pgoff_t pg_start, pg_end; loff_t new_size; loff_t off_end; block_t expanded = 0; int err; err = inode_newsize_ok(inode, (len + offset)); if (err) return err; err = f2fs_convert_inline_inode(inode); if (err) return err; f2fs_balance_fs(sbi, true); pg_start = ((unsigned long long)offset) >> PAGE_SHIFT; pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; off_end = (offset + len) & (PAGE_SIZE - 1); map.m_lblk = pg_start; map.m_len = pg_end - pg_start; if (off_end) map.m_len++; if (!map.m_len) return 0; if (f2fs_is_pinned_file(inode)) { block_t sec_blks = CAP_BLKS_PER_SEC(sbi); block_t sec_len = roundup(map.m_len, sec_blks); map.m_len = sec_blks; next_alloc: if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ? ZONED_PIN_SEC_REQUIRED_COUNT : GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { f2fs_down_write(&sbi->gc_lock); stat_inc_gc_call_count(sbi, FOREGROUND); err = f2fs_gc(sbi, &gc_control); if (err && err != -ENODATA) goto out_err; } f2fs_down_write(&sbi->pin_sem); err = f2fs_allocate_pinning_section(sbi); if (err) { f2fs_up_write(&sbi->pin_sem); goto out_err; } map.m_seg_type = CURSEG_COLD_DATA_PINNED; err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_DIO); file_dont_truncate(inode); f2fs_up_write(&sbi->pin_sem); expanded += map.m_len; sec_len -= map.m_len; map.m_lblk += map.m_len; if (!err && sec_len) goto next_alloc; map.m_len = expanded; } else { err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRE_AIO); expanded = map.m_len; } out_err: if (err) { pgoff_t last_off; if (!expanded) return err; last_off = pg_start + expanded - 1; /* update new size to the failed position */ new_size = (last_off == pg_end) ? offset + len : (loff_t)(last_off + 1) << PAGE_SHIFT; } else { new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; } if (new_size > i_size_read(inode)) { if (mode & FALLOC_FL_KEEP_SIZE) file_set_keep_isize(inode); else f2fs_i_size_write(inode, new_size); } return err; } static long f2fs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret = 0; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) return -ENOSPC; if (!f2fs_is_compress_backend_ready(inode) || IS_DEVICE_ALIASING(inode)) return -EOPNOTSUPP; /* f2fs only support ->fallocate for regular file */ if (!S_ISREG(inode->i_mode)) return -EINVAL; if (IS_ENCRYPTED(inode) && (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE))) return -EOPNOTSUPP; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)) return -EOPNOTSUPP; inode_lock(inode); /* * Pinned file should not support partial truncation since the block * can be used by applications. */ if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) && (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE))) { ret = -EOPNOTSUPP; goto out; } ret = file_modified(file); if (ret) goto out; /* * wait for inflight dio, blocks should be removed after IO * completion. */ inode_dio_wait(inode); if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset >= inode->i_size) goto out; ret = f2fs_punch_hole(inode, offset, len); } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { ret = f2fs_collapse_range(inode, offset, len); } else if (mode & FALLOC_FL_ZERO_RANGE) { ret = f2fs_zero_range(inode, offset, len, mode); } else if (mode & FALLOC_FL_INSERT_RANGE) { ret = f2fs_insert_range(inode, offset, len); } else { ret = f2fs_expand_inode_data(inode, offset, len, mode); } if (!ret) { inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); f2fs_mark_inode_dirty_sync(inode, false); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); } out: inode_unlock(inode); trace_f2fs_fallocate(inode, mode, offset, len, ret); return ret; } static int f2fs_release_file(struct inode *inode, struct file *filp) { /* * f2fs_release_file is called at every close calls. So we should * not drop any inmemory pages by close called by other process. */ if (!(filp->f_mode & FMODE_WRITE) || atomic_read(&inode->i_writecount) != 1) return 0; inode_lock(inode); f2fs_abort_atomic_write(inode, true); inode_unlock(inode); return 0; } static int f2fs_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); /* * If the process doing a transaction is crashed, we should do * roll-back. Otherwise, other reader/write can see corrupted database * until all the writers close its file. Since this should be done * before dropping file lock, it needs to do in ->flush. */ if (F2FS_I(inode)->atomic_write_task == current && (current->flags & PF_EXITING)) { inode_lock(inode); f2fs_abort_atomic_write(inode, true); inode_unlock(inode); } return 0; } static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask) { struct f2fs_inode_info *fi = F2FS_I(inode); u32 masked_flags = fi->i_flags & mask; /* mask can be shrunk by flags_valid selector */ iflags &= mask; /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) return -EPERM; if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) { if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) return -EOPNOTSUPP; if (!f2fs_empty_dir(inode)) return -ENOTEMPTY; } if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) { if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) return -EOPNOTSUPP; if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL)) return -EINVAL; } if ((iflags ^ masked_flags) & F2FS_COMPR_FL) { if (masked_flags & F2FS_COMPR_FL) { if (!f2fs_disable_compressed_file(inode)) return -EINVAL; } else { /* try to convert inline_data to support compression */ int err = f2fs_convert_inline_inode(inode); if (err) return err; f2fs_down_write(&fi->i_sem); if (!f2fs_may_compress(inode) || (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))) { f2fs_up_write(&fi->i_sem); return -EINVAL; } err = set_compress_context(inode); f2fs_up_write(&fi->i_sem); if (err) return err; } } fi->i_flags = iflags | (fi->i_flags & ~mask); f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) && (fi->i_flags & F2FS_NOCOMP_FL)); if (fi->i_flags & F2FS_PROJINHERIT_FL) set_inode_flag(inode, FI_PROJ_INHERIT); else clear_inode_flag(inode, FI_PROJ_INHERIT); inode_set_ctime_current(inode); f2fs_set_inode_flags(inode); f2fs_mark_inode_dirty_sync(inode, true); return 0; } /* FS_IOC_[GS]ETFLAGS and FS_IOC_FS[GS]ETXATTR support */ /* * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL. * * Translating flags to fsx_flags value used by FS_IOC_FSGETXATTR and * FS_IOC_FSSETXATTR is done by the VFS. */ static const struct { u32 iflag; u32 fsflag; } f2fs_fsflags_map[] = { { F2FS_COMPR_FL, FS_COMPR_FL }, { F2FS_SYNC_FL, FS_SYNC_FL }, { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL }, { F2FS_APPEND_FL, FS_APPEND_FL }, { F2FS_NODUMP_FL, FS_NODUMP_FL }, { F2FS_NOATIME_FL, FS_NOATIME_FL }, { F2FS_NOCOMP_FL, FS_NOCOMP_FL }, { F2FS_INDEX_FL, FS_INDEX_FL }, { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL }, }; #define F2FS_GETTABLE_FS_FL ( \ FS_COMPR_FL | \ FS_SYNC_FL | \ FS_IMMUTABLE_FL | \ FS_APPEND_FL | \ FS_NODUMP_FL | \ FS_NOATIME_FL | \ FS_NOCOMP_FL | \ FS_INDEX_FL | \ FS_DIRSYNC_FL | \ FS_PROJINHERIT_FL | \ FS_ENCRYPT_FL | \ FS_INLINE_DATA_FL | \ FS_NOCOW_FL | \ FS_VERITY_FL | \ FS_CASEFOLD_FL) #define F2FS_SETTABLE_FS_FL ( \ FS_COMPR_FL | \ FS_SYNC_FL | \ FS_IMMUTABLE_FL | \ FS_APPEND_FL | \ FS_NODUMP_FL | \ FS_NOATIME_FL | \ FS_NOCOMP_FL | \ FS_DIRSYNC_FL | \ FS_PROJINHERIT_FL | \ FS_CASEFOLD_FL) /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ static inline u32 f2fs_iflags_to_fsflags(u32 iflags) { u32 fsflags = 0; int i; for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) if (iflags & f2fs_fsflags_map[i].iflag) fsflags |= f2fs_fsflags_map[i].fsflag; return fsflags; } /* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */ static inline u32 f2fs_fsflags_to_iflags(u32 fsflags) { u32 iflags = 0; int i; for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++) if (fsflags & f2fs_fsflags_map[i].fsflag) iflags |= f2fs_fsflags_map[i].iflag; return iflags; } static int f2fs_ioc_getversion(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); return put_user(inode->i_generation, (int __user *)arg); } static int f2fs_ioc_start_atomic_write(struct file *filp, bool truncate) { struct inode *inode = file_inode(filp); struct mnt_idmap *idmap = file_mnt_idmap(filp); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); loff_t isize; int ret; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; if (!inode_owner_or_capable(idmap, inode)) return -EACCES; if (!S_ISREG(inode->i_mode)) return -EINVAL; if (filp->f_flags & O_DIRECT) return -EINVAL; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (!f2fs_disable_compressed_file(inode) || f2fs_is_pinned_file(inode)) { ret = -EINVAL; goto out; } if (f2fs_is_atomic_file(inode)) goto out; ret = f2fs_convert_inline_inode(inode); if (ret) goto out; f2fs_down_write(&fi->i_gc_rwsem[WRITE]); f2fs_down_write(&fi->i_gc_rwsem[READ]); /* * Should wait end_io to count F2FS_WB_CP_DATA correctly by * f2fs_is_atomic_file. */ if (get_dirty_pages(inode)) f2fs_warn(sbi, "Unexpected flush for atomic writes: ino=%lu, npages=%u", inode->i_ino, get_dirty_pages(inode)); ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); if (ret) goto out_unlock; /* Check if the inode already has a COW inode */ if (fi->cow_inode == NULL) { /* Create a COW inode for atomic write */ struct dentry *dentry = file_dentry(filp); struct inode *dir = d_inode(dentry->d_parent); ret = f2fs_get_tmpfile(idmap, dir, &fi->cow_inode); if (ret) goto out_unlock; set_inode_flag(fi->cow_inode, FI_COW_FILE); clear_inode_flag(fi->cow_inode, FI_INLINE_DATA); /* Set the COW inode's atomic_inode to the atomic inode */ F2FS_I(fi->cow_inode)->atomic_inode = inode; } else { /* Reuse the already created COW inode */ f2fs_bug_on(sbi, get_dirty_pages(fi->cow_inode)); invalidate_mapping_pages(fi->cow_inode->i_mapping, 0, -1); ret = f2fs_do_truncate_blocks(fi->cow_inode, 0, true); if (ret) goto out_unlock; } f2fs_write_inode(inode, NULL); stat_inc_atomic_inode(inode); set_inode_flag(inode, FI_ATOMIC_FILE); isize = i_size_read(inode); fi->original_i_size = isize; if (truncate) { set_inode_flag(inode, FI_ATOMIC_REPLACE); truncate_inode_pages_final(inode->i_mapping); f2fs_i_size_write(inode, 0); isize = 0; } f2fs_i_size_write(fi->cow_inode, isize); out_unlock: f2fs_up_write(&fi->i_gc_rwsem[READ]); f2fs_up_write(&fi->i_gc_rwsem[WRITE]); if (ret) goto out; f2fs_update_time(sbi, REQ_TIME); fi->atomic_write_task = current; stat_update_max_atomic_write(inode); fi->atomic_write_cnt = 0; out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_commit_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); struct mnt_idmap *idmap = file_mnt_idmap(filp); int ret; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; if (!inode_owner_or_capable(idmap, inode)) return -EACCES; ret = mnt_want_write_file(filp); if (ret) return ret; f2fs_balance_fs(F2FS_I_SB(inode), true); inode_lock(inode); if (f2fs_is_atomic_file(inode)) { ret = f2fs_commit_atomic_write(inode); if (!ret) ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); f2fs_abort_atomic_write(inode, ret); } else { ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); } inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_abort_atomic_write(struct file *filp) { struct inode *inode = file_inode(filp); struct mnt_idmap *idmap = file_mnt_idmap(filp); int ret; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; if (!inode_owner_or_capable(idmap, inode)) return -EACCES; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); f2fs_abort_atomic_write(inode, true); inode_unlock(inode); mnt_drop_write_file(filp); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return ret; } int f2fs_do_shutdown(struct f2fs_sb_info *sbi, unsigned int flag, bool readonly, bool need_lock) { struct super_block *sb = sbi->sb; int ret = 0; switch (flag) { case F2FS_GOING_DOWN_FULLSYNC: ret = bdev_freeze(sb->s_bdev); if (ret) goto out; f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); bdev_thaw(sb->s_bdev); break; case F2FS_GOING_DOWN_METASYNC: /* do checkpoint only */ ret = f2fs_sync_fs(sb, 1); if (ret) { if (ret == -EIO) ret = 0; goto out; } f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); break; case F2FS_GOING_DOWN_NOSYNC: f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); break; case F2FS_GOING_DOWN_METAFLUSH: f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO); f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_SHUTDOWN); break; case F2FS_GOING_DOWN_NEED_FSCK: set_sbi_flag(sbi, SBI_NEED_FSCK); set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); set_sbi_flag(sbi, SBI_IS_DIRTY); /* do checkpoint only */ ret = f2fs_sync_fs(sb, 1); if (ret == -EIO) ret = 0; goto out; default: ret = -EINVAL; goto out; } if (readonly) goto out; /* * grab sb->s_umount to avoid racing w/ remount() and other shutdown * paths. */ if (need_lock) down_write(&sbi->sb->s_umount); f2fs_stop_gc_thread(sbi); f2fs_stop_discard_thread(sbi); f2fs_drop_discard_cmd(sbi); clear_opt(sbi, DISCARD); if (need_lock) up_write(&sbi->sb->s_umount); f2fs_update_time(sbi, REQ_TIME); out: trace_f2fs_shutdown(sbi, flag, ret); return ret; } static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); __u32 in; int ret; bool need_drop = false, readonly = false; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(in, (__u32 __user *)arg)) return -EFAULT; if (in != F2FS_GOING_DOWN_FULLSYNC) { ret = mnt_want_write_file(filp); if (ret) { if (ret != -EROFS) return ret; /* fallback to nosync shutdown for readonly fs */ in = F2FS_GOING_DOWN_NOSYNC; readonly = true; } else { need_drop = true; } } ret = f2fs_do_shutdown(sbi, in, readonly, true); if (need_drop) mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; struct fstrim_range range; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!f2fs_hw_support_discard(F2FS_SB(sb))) return -EOPNOTSUPP; if (copy_from_user(&range, (struct fstrim_range __user *)arg, sizeof(range))) return -EFAULT; ret = mnt_want_write_file(filp); if (ret) return ret; range.minlen = max((unsigned int)range.minlen, bdev_discard_granularity(sb->s_bdev)); ret = f2fs_trim_fs(F2FS_SB(sb), &range); mnt_drop_write_file(filp); if (ret < 0) return ret; if (copy_to_user((struct fstrim_range __user *)arg, &range, sizeof(range))) return -EFAULT; f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return 0; } static bool uuid_is_nonzero(__u8 u[16]) { int i; for (i = 0; i < 16; i++) if (u[i]) return true; return false; } static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); int ret; if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode))) return -EOPNOTSUPP; ret = fscrypt_ioctl_set_policy(filp, (const void __user *)arg); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return ret; } static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_get_policy(filp, (void __user *)arg); } static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); u8 encrypt_pw_salt[16]; int err; if (!f2fs_sb_has_encrypt(sbi)) return -EOPNOTSUPP; err = mnt_want_write_file(filp); if (err) return err; f2fs_down_write(&sbi->sb_lock); if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) goto got_it; /* update superblock with uuid */ generate_random_uuid(sbi->raw_super->encrypt_pw_salt); err = f2fs_commit_super(sbi, false); if (err) { /* undo new data */ memset(sbi->raw_super->encrypt_pw_salt, 0, 16); goto out_err; } got_it: memcpy(encrypt_pw_salt, sbi->raw_super->encrypt_pw_salt, 16); out_err: f2fs_up_write(&sbi->sb_lock); mnt_drop_write_file(filp); if (!err && copy_to_user((__u8 __user *)arg, encrypt_pw_salt, 16)) err = -EFAULT; return err; } static int f2fs_ioc_get_encryption_policy_ex(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg); } static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_add_key(filp, (void __user *)arg); } static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_remove_key(filp, (void __user *)arg); } static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg); } static int f2fs_ioc_get_encryption_key_status(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_get_key_status(filp, (void __user *)arg); } static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fscrypt_ioctl_get_nonce(filp, (void __user *)arg); } static int f2fs_ioc_gc(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_gc_control gc_control = { .victim_segno = NULL_SEGNO, .no_bg_gc = false, .should_migrate_blocks = false, .nr_free_secs = 0 }; __u32 sync; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (get_user(sync, (__u32 __user *)arg)) return -EFAULT; if (f2fs_readonly(sbi->sb)) return -EROFS; ret = mnt_want_write_file(filp); if (ret) return ret; if (!sync) { if (!f2fs_down_write_trylock(&sbi->gc_lock)) { ret = -EBUSY; goto out; } } else { f2fs_down_write(&sbi->gc_lock); } gc_control.init_gc_type = sync ? FG_GC : BG_GC; gc_control.err_gc_skipped = sync; stat_inc_gc_call_count(sbi, FOREGROUND); ret = f2fs_gc(sbi, &gc_control); out: mnt_drop_write_file(filp); return ret; } static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range) { struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); struct f2fs_gc_control gc_control = { .init_gc_type = range->sync ? FG_GC : BG_GC, .no_bg_gc = false, .should_migrate_blocks = false, .err_gc_skipped = range->sync, .nr_free_secs = 0 }; u64 end; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (f2fs_readonly(sbi->sb)) return -EROFS; end = range->start + range->len; if (end < range->start || range->start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) return -EINVAL; ret = mnt_want_write_file(filp); if (ret) return ret; do_more: if (!range->sync) { if (!f2fs_down_write_trylock(&sbi->gc_lock)) { ret = -EBUSY; goto out; } } else { f2fs_down_write(&sbi->gc_lock); } gc_control.victim_segno = GET_SEGNO(sbi, range->start); stat_inc_gc_call_count(sbi, FOREGROUND); ret = f2fs_gc(sbi, &gc_control); if (ret) { if (ret == -EBUSY) ret = -EAGAIN; goto out; } range->start += CAP_BLKS_PER_SEC(sbi); if (range->start <= end) goto do_more; out: mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg) { struct f2fs_gc_range range; if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg, sizeof(range))) return -EFAULT; return __f2fs_ioc_gc_range(filp, &range); } static int f2fs_ioc_write_checkpoint(struct file *filp) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (f2fs_readonly(sbi->sb)) return -EROFS; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled."); return -EINVAL; } ret = mnt_want_write_file(filp); if (ret) return ret; ret = f2fs_sync_fs(sbi->sb, 1); mnt_drop_write_file(filp); return ret; } static int f2fs_defragment_range(struct f2fs_sb_info *sbi, struct file *filp, struct f2fs_defragment *range) { struct inode *inode = file_inode(filp); struct f2fs_map_blocks map = { .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE, .m_may_create = false }; struct extent_info ei = {}; pgoff_t pg_start, pg_end, next_pgofs; unsigned int total = 0, sec_num; block_t blk_end = 0; bool fragmented = false; int err; f2fs_balance_fs(sbi, true); inode_lock(inode); pg_start = range->start >> PAGE_SHIFT; pg_end = min_t(pgoff_t, (range->start + range->len) >> PAGE_SHIFT, DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE)); if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED) || f2fs_is_atomic_file(inode)) { err = -EINVAL; goto unlock_out; } /* if in-place-update policy is enabled, don't waste time here */ set_inode_flag(inode, FI_OPU_WRITE); if (f2fs_should_update_inplace(inode, NULL)) { err = -EINVAL; goto out; } /* writeback all dirty pages in the range */ err = filemap_write_and_wait_range(inode->i_mapping, pg_start << PAGE_SHIFT, (pg_end << PAGE_SHIFT) - 1); if (err) goto out; /* * lookup mapping info in extent cache, skip defragmenting if physical * block addresses are continuous. */ if (f2fs_lookup_read_extent_cache(inode, pg_start, &ei)) { if ((pgoff_t)ei.fofs + ei.len >= pg_end) goto out; } map.m_lblk = pg_start; map.m_next_pgofs = &next_pgofs; /* * lookup mapping info in dnode page cache, skip defragmenting if all * physical block addresses are continuous even if there are hole(s) * in logical blocks. */ while (map.m_lblk < pg_end) { map.m_len = pg_end - map.m_lblk; err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT); if (err) goto out; if (!(map.m_flags & F2FS_MAP_FLAGS)) { map.m_lblk = next_pgofs; continue; } if (blk_end && blk_end != map.m_pblk) fragmented = true; /* record total count of block that we're going to move */ total += map.m_len; blk_end = map.m_pblk + map.m_len; map.m_lblk += map.m_len; } if (!fragmented) { total = 0; goto out; } sec_num = DIV_ROUND_UP(total, CAP_BLKS_PER_SEC(sbi)); /* * make sure there are enough free section for LFS allocation, this can * avoid defragment running in SSR mode when free section are allocated * intensively */ if (has_not_enough_free_secs(sbi, 0, sec_num)) { err = -EAGAIN; goto out; } map.m_lblk = pg_start; map.m_len = pg_end - pg_start; total = 0; while (map.m_lblk < pg_end) { pgoff_t idx; int cnt = 0; do_map: map.m_len = pg_end - map.m_lblk; err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT); if (err) goto clear_out; if (!(map.m_flags & F2FS_MAP_FLAGS)) { map.m_lblk = next_pgofs; goto check; } set_inode_flag(inode, FI_SKIP_WRITES); idx = map.m_lblk; while (idx < map.m_lblk + map.m_len && cnt < BLKS_PER_SEG(sbi)) { struct page *page; page = f2fs_get_lock_data_page(inode, idx, true); if (IS_ERR(page)) { err = PTR_ERR(page); goto clear_out; } f2fs_wait_on_page_writeback(page, DATA, true, true); set_page_dirty(page); set_page_private_gcing(page); f2fs_put_page(page, 1); idx++; cnt++; total++; } map.m_lblk = idx; check: if (map.m_lblk < pg_end && cnt < BLKS_PER_SEG(sbi)) goto do_map; clear_inode_flag(inode, FI_SKIP_WRITES); err = filemap_fdatawrite(inode->i_mapping); if (err) goto out; } clear_out: clear_inode_flag(inode, FI_SKIP_WRITES); out: clear_inode_flag(inode, FI_OPU_WRITE); unlock_out: inode_unlock(inode); if (!err) range->len = (u64)total << PAGE_SHIFT; return err; } static int f2fs_ioc_defragment(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_defragment range; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!S_ISREG(inode->i_mode)) return -EINVAL; if (f2fs_readonly(sbi->sb)) return -EROFS; if (copy_from_user(&range, (struct f2fs_defragment __user *)arg, sizeof(range))) return -EFAULT; /* verify alignment of offset & size */ if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1)) return -EINVAL; if (unlikely((range.start + range.len) >> PAGE_SHIFT > max_file_blocks(inode))) return -EINVAL; err = mnt_want_write_file(filp); if (err) return err; err = f2fs_defragment_range(sbi, filp, &range); mnt_drop_write_file(filp); if (range.len) f2fs_update_time(sbi, REQ_TIME); if (err < 0) return err; if (copy_to_user((struct f2fs_defragment __user *)arg, &range, sizeof(range))) return -EFAULT; return 0; } static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, size_t len) { struct inode *src = file_inode(file_in); struct inode *dst = file_inode(file_out); struct f2fs_sb_info *sbi = F2FS_I_SB(src); size_t olen = len, dst_max_i_size = 0; size_t dst_osize; int ret; if (file_in->f_path.mnt != file_out->f_path.mnt || src->i_sb != dst->i_sb) return -EXDEV; if (unlikely(f2fs_readonly(src->i_sb))) return -EROFS; if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode)) return -EINVAL; if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst)) return -EOPNOTSUPP; if (pos_out < 0 || pos_in < 0) return -EINVAL; if (src == dst) { if (pos_in == pos_out) return 0; if (pos_out > pos_in && pos_out < pos_in + len) return -EINVAL; } inode_lock(src); if (src != dst) { ret = -EBUSY; if (!inode_trylock(dst)) goto out; } if (f2fs_compressed_file(src) || f2fs_compressed_file(dst) || f2fs_is_pinned_file(src) || f2fs_is_pinned_file(dst)) { ret = -EOPNOTSUPP; goto out_unlock; } if (f2fs_is_atomic_file(src) || f2fs_is_atomic_file(dst)) { ret = -EINVAL; goto out_unlock; } ret = -EINVAL; if (pos_in + len > src->i_size || pos_in + len < pos_in) goto out_unlock; if (len == 0) olen = len = src->i_size - pos_in; if (pos_in + len == src->i_size) len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in; if (len == 0) { ret = 0; goto out_unlock; } dst_osize = dst->i_size; if (pos_out + olen > dst->i_size) dst_max_i_size = pos_out + olen; /* verify the end result is block aligned */ if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) || !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) || !IS_ALIGNED(pos_out, F2FS_BLKSIZE)) goto out_unlock; ret = f2fs_convert_inline_inode(src); if (ret) goto out_unlock; ret = f2fs_convert_inline_inode(dst); if (ret) goto out_unlock; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(src->i_mapping, pos_in, pos_in + len); if (ret) goto out_unlock; ret = filemap_write_and_wait_range(dst->i_mapping, pos_out, pos_out + len); if (ret) goto out_unlock; f2fs_balance_fs(sbi, true); f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); if (src != dst) { ret = -EBUSY; if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) goto out_src; } f2fs_lock_op(sbi); ret = __exchange_data_block(src, dst, F2FS_BYTES_TO_BLK(pos_in), F2FS_BYTES_TO_BLK(pos_out), F2FS_BYTES_TO_BLK(len), false); if (!ret) { if (dst_max_i_size) f2fs_i_size_write(dst, dst_max_i_size); else if (dst_osize != dst->i_size) f2fs_i_size_write(dst, dst_osize); } f2fs_unlock_op(sbi); if (src != dst) f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); out_src: f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); if (ret) goto out_unlock; inode_set_mtime_to_ts(src, inode_set_ctime_current(src)); f2fs_mark_inode_dirty_sync(src, false); if (src != dst) { inode_set_mtime_to_ts(dst, inode_set_ctime_current(dst)); f2fs_mark_inode_dirty_sync(dst, false); } f2fs_update_time(sbi, REQ_TIME); out_unlock: if (src != dst) inode_unlock(dst); out: inode_unlock(src); return ret; } static int __f2fs_ioc_move_range(struct file *filp, struct f2fs_move_range *range) { int err; if (!(filp->f_mode & FMODE_READ) || !(filp->f_mode & FMODE_WRITE)) return -EBADF; CLASS(fd, dst)(range->dst_fd); if (fd_empty(dst)) return -EBADF; if (!(fd_file(dst)->f_mode & FMODE_WRITE)) return -EBADF; err = mnt_want_write_file(filp); if (err) return err; err = f2fs_move_file_range(filp, range->pos_in, fd_file(dst), range->pos_out, range->len); mnt_drop_write_file(filp); return err; } static int f2fs_ioc_move_range(struct file *filp, unsigned long arg) { struct f2fs_move_range range; if (copy_from_user(&range, (struct f2fs_move_range __user *)arg, sizeof(range))) return -EFAULT; return __f2fs_ioc_move_range(filp, &range); } static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct sit_info *sm = SIT_I(sbi); unsigned int start_segno = 0, end_segno = 0; unsigned int dev_start_segno = 0, dev_end_segno = 0; struct f2fs_flush_device range; struct f2fs_gc_control gc_control = { .init_gc_type = FG_GC, .should_migrate_blocks = true, .err_gc_skipped = true, .nr_free_secs = 0 }; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (f2fs_readonly(sbi->sb)) return -EROFS; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) return -EINVAL; if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg, sizeof(range))) return -EFAULT; if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || __is_large_section(sbi)) { f2fs_warn(sbi, "Can't flush %u in %d for SEGS_PER_SEC %u != 1", range.dev_num, sbi->s_ndevs, SEGS_PER_SEC(sbi)); return -EINVAL; } ret = mnt_want_write_file(filp); if (ret) return ret; if (range.dev_num != 0) dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk); dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk); start_segno = sm->last_victim[FLUSH_DEVICE]; if (start_segno < dev_start_segno || start_segno >= dev_end_segno) start_segno = dev_start_segno; end_segno = min(start_segno + range.segments, dev_end_segno); while (start_segno < end_segno) { if (!f2fs_down_write_trylock(&sbi->gc_lock)) { ret = -EBUSY; goto out; } sm->last_victim[GC_CB] = end_segno + 1; sm->last_victim[GC_GREEDY] = end_segno + 1; sm->last_victim[ALLOC_NEXT] = end_segno + 1; gc_control.victim_segno = start_segno; stat_inc_gc_call_count(sbi, FOREGROUND); ret = f2fs_gc(sbi, &gc_control); if (ret == -EAGAIN) ret = 0; else if (ret < 0) break; start_segno++; } out: mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_get_features(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature); /* Must validate to set it with SQLite behavior in Android. */ sb_feature |= F2FS_FEATURE_ATOMIC_WRITE; return put_user(sb_feature, (u32 __user *)arg); } #ifdef CONFIG_QUOTA int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) { struct dquot *transfer_to[MAXQUOTAS] = {}; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct super_block *sb = sbi->sb; int err; transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); if (IS_ERR(transfer_to[PRJQUOTA])) return PTR_ERR(transfer_to[PRJQUOTA]); err = __dquot_transfer(inode, transfer_to); if (err) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); dqput(transfer_to[PRJQUOTA]); return err; } static int f2fs_ioc_setproject(struct inode *inode, __u32 projid) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode *ri = NULL; kprojid_t kprojid; int err; if (!f2fs_sb_has_project_quota(sbi)) { if (projid != F2FS_DEF_PROJID) return -EOPNOTSUPP; else return 0; } if (!f2fs_has_extra_attr(inode)) return -EOPNOTSUPP; kprojid = make_kprojid(&init_user_ns, (projid_t)projid); if (projid_eq(kprojid, fi->i_projid)) return 0; err = -EPERM; /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) return err; if (!F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid)) return -EOVERFLOW; err = f2fs_dquot_initialize(inode); if (err) return err; f2fs_lock_op(sbi); err = f2fs_transfer_project_quota(inode, kprojid); if (err) goto out_unlock; fi->i_projid = kprojid; inode_set_ctime_current(inode); f2fs_mark_inode_dirty_sync(inode, true); out_unlock: f2fs_unlock_op(sbi); return err; } #else int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid) { return 0; } static int f2fs_ioc_setproject(struct inode *inode, __u32 projid) { if (projid != F2FS_DEF_PROJID) return -EOPNOTSUPP; return 0; } #endif int f2fs_fileattr_get(struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); struct f2fs_inode_info *fi = F2FS_I(inode); u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags); if (IS_ENCRYPTED(inode)) fsflags |= FS_ENCRYPT_FL; if (IS_VERITY(inode)) fsflags |= FS_VERITY_FL; if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) fsflags |= FS_INLINE_DATA_FL; if (is_inode_flag_set(inode, FI_PIN_FILE)) fsflags |= FS_NOCOW_FL; fileattr_fill_flags(fa, fsflags & F2FS_GETTABLE_FS_FL); if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid); return 0; } int f2fs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { struct inode *inode = d_inode(dentry); u32 fsflags = fa->flags, mask = F2FS_SETTABLE_FS_FL; u32 iflags; int err; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) return -EIO; if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) return -ENOSPC; if (fsflags & ~F2FS_GETTABLE_FS_FL) return -EOPNOTSUPP; fsflags &= F2FS_SETTABLE_FS_FL; if (!fa->flags_valid) mask &= FS_COMMON_FL; iflags = f2fs_fsflags_to_iflags(fsflags); if (f2fs_mask_flags(inode->i_mode, iflags) != iflags) return -EOPNOTSUPP; err = f2fs_setflags_common(inode, iflags, f2fs_fsflags_to_iflags(mask)); if (!err) err = f2fs_ioc_setproject(inode, fa->fsx_projid); return err; } int f2fs_pin_file_control(struct inode *inode, bool inc) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (IS_DEVICE_ALIASING(inode)) return -EINVAL; if (fi->i_gc_failures >= sbi->gc_pin_file_threshold) { f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials", __func__, inode->i_ino, fi->i_gc_failures); clear_inode_flag(inode, FI_PIN_FILE); return -EAGAIN; } /* Use i_gc_failures for normal file as a risk signal. */ if (inc) f2fs_i_gc_failures_write(inode, fi->i_gc_failures + 1); return 0; } static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); __u32 pin; int ret = 0; if (get_user(pin, (__u32 __user *)arg)) return -EFAULT; if (!S_ISREG(inode->i_mode)) return -EINVAL; if (f2fs_readonly(sbi->sb)) return -EROFS; if (!pin && IS_DEVICE_ALIASING(inode)) return -EOPNOTSUPP; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (f2fs_is_atomic_file(inode)) { ret = -EINVAL; goto out; } if (!pin) { clear_inode_flag(inode, FI_PIN_FILE); f2fs_i_gc_failures_write(inode, 0); goto done; } else if (f2fs_is_pinned_file(inode)) { goto done; } if (F2FS_HAS_BLOCKS(inode)) { ret = -EFBIG; goto out; } /* Let's allow file pinning on zoned device. */ if (!f2fs_sb_has_blkzoned(sbi) && f2fs_should_update_outplace(inode, NULL)) { ret = -EINVAL; goto out; } if (f2fs_pin_file_control(inode, false)) { ret = -EAGAIN; goto out; } ret = f2fs_convert_inline_inode(inode); if (ret) goto out; if (!f2fs_disable_compressed_file(inode)) { ret = -EOPNOTSUPP; goto out; } set_inode_flag(inode, FI_PIN_FILE); ret = F2FS_I(inode)->i_gc_failures; done: f2fs_update_time(sbi, REQ_TIME); out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); __u32 pin = 0; if (is_inode_flag_set(inode, FI_PIN_FILE)) pin = F2FS_I(inode)->i_gc_failures; return put_user(pin, (u32 __user *)arg); } static int f2fs_ioc_get_dev_alias_file(struct file *filp, unsigned long arg) { return put_user(IS_DEVICE_ALIASING(file_inode(filp)) ? 1 : 0, (u32 __user *)arg); } int f2fs_precache_extents(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_map_blocks map; pgoff_t m_next_extent; loff_t end; int err; if (is_inode_flag_set(inode, FI_NO_EXTENT)) return -EOPNOTSUPP; map.m_lblk = 0; map.m_pblk = 0; map.m_next_pgofs = NULL; map.m_next_extent = &m_next_extent; map.m_seg_type = NO_CHECK_TYPE; map.m_may_create = false; end = F2FS_BLK_ALIGN(i_size_read(inode)); while (map.m_lblk < end) { map.m_len = end - map.m_lblk; f2fs_down_write(&fi->i_gc_rwsem[WRITE]); err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE); f2fs_up_write(&fi->i_gc_rwsem[WRITE]); if (err || !map.m_len) return err; map.m_lblk = m_next_extent; } return 0; } static int f2fs_ioc_precache_extents(struct file *filp) { return f2fs_precache_extents(file_inode(filp)); } static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg) { struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp)); __u64 block_count; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (f2fs_readonly(sbi->sb)) return -EROFS; if (copy_from_user(&block_count, (void __user *)arg, sizeof(block_count))) return -EFAULT; return f2fs_resize_fs(filp, block_count); } static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) { f2fs_warn(F2FS_I_SB(inode), "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem", inode->i_ino); return -EOPNOTSUPP; } return fsverity_ioctl_enable(filp, (const void __user *)arg); } static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fsverity_ioctl_measure(filp, (void __user *)arg); } static int f2fs_ioc_read_verity_metadata(struct file *filp, unsigned long arg) { if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp)))) return -EOPNOTSUPP; return fsverity_ioctl_read_metadata(filp, (const void __user *)arg); } static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); char *vbuf; int count; int err = 0; vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL); if (!vbuf) return -ENOMEM; f2fs_down_read(&sbi->sb_lock); count = utf16s_to_utf8s(sbi->raw_super->volume_name, ARRAY_SIZE(sbi->raw_super->volume_name), UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); f2fs_up_read(&sbi->sb_lock); if (copy_to_user((char __user *)arg, vbuf, min(FSLABEL_MAX, count))) err = -EFAULT; kfree(vbuf); return err; } static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); char *vbuf; int err = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX); if (IS_ERR(vbuf)) return PTR_ERR(vbuf); err = mnt_want_write_file(filp); if (err) goto out; f2fs_down_write(&sbi->sb_lock); memset(sbi->raw_super->volume_name, 0, sizeof(sbi->raw_super->volume_name)); utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN, sbi->raw_super->volume_name, ARRAY_SIZE(sbi->raw_super->volume_name)); err = f2fs_commit_super(sbi, false); f2fs_up_write(&sbi->sb_lock); mnt_drop_write_file(filp); out: kfree(vbuf); return err; } static int f2fs_get_compress_blocks(struct inode *inode, __u64 *blocks) { if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) return -EOPNOTSUPP; if (!f2fs_compressed_file(inode)) return -EINVAL; *blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks); return 0; } static int f2fs_ioc_get_compress_blocks(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); __u64 blocks; int ret; ret = f2fs_get_compress_blocks(inode, &blocks); if (ret < 0) return ret; return put_user(blocks, (u64 __user *)arg); } static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); unsigned int released_blocks = 0; int cluster_size = F2FS_I(dn->inode)->i_cluster_size; block_t blkaddr; int i; for (i = 0; i < count; i++) { blkaddr = data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node + i); if (!__is_valid_data_blkaddr(blkaddr)) continue; if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))) return -EFSCORRUPTED; } while (count) { int compr_blocks = 0; for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { blkaddr = f2fs_data_blkaddr(dn); if (i == 0) { if (blkaddr == COMPRESS_ADDR) continue; dn->ofs_in_node += cluster_size; goto next; } if (__is_valid_data_blkaddr(blkaddr)) compr_blocks++; if (blkaddr != NEW_ADDR) continue; f2fs_set_data_blkaddr(dn, NULL_ADDR); } f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false); dec_valid_block_count(sbi, dn->inode, cluster_size - compr_blocks); released_blocks += cluster_size - compr_blocks; next: count -= cluster_size; } return released_blocks; } static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t page_idx = 0, last_idx; unsigned int released_blocks = 0; int ret; int writecount; if (!f2fs_sb_has_compression(sbi)) return -EOPNOTSUPP; if (f2fs_readonly(sbi->sb)) return -EROFS; ret = mnt_want_write_file(filp); if (ret) return ret; f2fs_balance_fs(sbi, true); inode_lock(inode); writecount = atomic_read(&inode->i_writecount); if ((filp->f_mode & FMODE_WRITE && writecount != 1) || (!(filp->f_mode & FMODE_WRITE) && writecount)) { ret = -EBUSY; goto out; } if (!f2fs_compressed_file(inode) || is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { ret = -EINVAL; goto out; } ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); if (ret) goto out; if (!atomic_read(&fi->i_compr_blocks)) { ret = -EPERM; goto out; } set_inode_flag(inode, FI_COMPRESS_RELEASED); inode_set_ctime_current(inode); f2fs_mark_inode_dirty_sync(inode, true); f2fs_down_write(&fi->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); while (page_idx < last_idx) { struct dnode_of_data dn; pgoff_t end_offset, count; f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); if (ret) { f2fs_unlock_op(sbi); if (ret == -ENOENT) { page_idx = f2fs_get_next_page_offset(&dn, page_idx); ret = 0; continue; } break; } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); count = round_up(count, fi->i_cluster_size); ret = release_compress_blocks(&dn, count); f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); if (ret < 0) break; page_idx += count; released_blocks += ret; } filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&fi->i_gc_rwsem[WRITE]); out: if (released_blocks) f2fs_update_time(sbi, REQ_TIME); inode_unlock(inode); mnt_drop_write_file(filp); if (ret >= 0) { ret = put_user(released_blocks, (u64 __user *)arg); } else if (released_blocks && atomic_read(&fi->i_compr_blocks)) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx " "iblocks=%llu, released=%u, compr_blocks=%u, " "run fsck to fix.", __func__, inode->i_ino, inode->i_blocks, released_blocks, atomic_read(&fi->i_compr_blocks)); } return ret; } static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count, unsigned int *reserved_blocks) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); int cluster_size = F2FS_I(dn->inode)->i_cluster_size; block_t blkaddr; int i; for (i = 0; i < count; i++) { blkaddr = data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node + i); if (!__is_valid_data_blkaddr(blkaddr)) continue; if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))) return -EFSCORRUPTED; } while (count) { int compr_blocks = 0; blkcnt_t reserved = 0; blkcnt_t to_reserved; int ret; for (i = 0; i < cluster_size; i++) { blkaddr = data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node + i); if (i == 0) { if (blkaddr != COMPRESS_ADDR) { dn->ofs_in_node += cluster_size; goto next; } continue; } /* * compressed cluster was not released due to it * fails in release_compress_blocks(), so NEW_ADDR * is a possible case. */ if (blkaddr == NEW_ADDR) { reserved++; continue; } if (__is_valid_data_blkaddr(blkaddr)) { compr_blocks++; continue; } } to_reserved = cluster_size - compr_blocks - reserved; /* for the case all blocks in cluster were reserved */ if (reserved && to_reserved == 1) { dn->ofs_in_node += cluster_size; goto next; } ret = inc_valid_block_count(sbi, dn->inode, &to_reserved, false); if (unlikely(ret)) return ret; for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) { if (f2fs_data_blkaddr(dn) == NULL_ADDR) f2fs_set_data_blkaddr(dn, NEW_ADDR); } f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true); *reserved_blocks += to_reserved; next: count -= cluster_size; } return 0; } static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); pgoff_t page_idx = 0, last_idx; unsigned int reserved_blocks = 0; int ret; if (!f2fs_sb_has_compression(sbi)) return -EOPNOTSUPP; if (f2fs_readonly(sbi->sb)) return -EROFS; ret = mnt_want_write_file(filp); if (ret) return ret; f2fs_balance_fs(sbi, true); inode_lock(inode); if (!f2fs_compressed_file(inode) || !is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { ret = -EINVAL; goto unlock_inode; } if (atomic_read(&fi->i_compr_blocks)) goto unlock_inode; f2fs_down_write(&fi->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); while (page_idx < last_idx) { struct dnode_of_data dn; pgoff_t end_offset, count; f2fs_lock_op(sbi); set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE); if (ret) { f2fs_unlock_op(sbi); if (ret == -ENOENT) { page_idx = f2fs_get_next_page_offset(&dn, page_idx); ret = 0; continue; } break; } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, last_idx - page_idx); count = round_up(count, fi->i_cluster_size); ret = reserve_compress_blocks(&dn, count, &reserved_blocks); f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); if (ret < 0) break; page_idx += count; } filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&fi->i_gc_rwsem[WRITE]); if (!ret) { clear_inode_flag(inode, FI_COMPRESS_RELEASED); inode_set_ctime_current(inode); f2fs_mark_inode_dirty_sync(inode, true); } unlock_inode: if (reserved_blocks) f2fs_update_time(sbi, REQ_TIME); inode_unlock(inode); mnt_drop_write_file(filp); if (!ret) { ret = put_user(reserved_blocks, (u64 __user *)arg); } else if (reserved_blocks && atomic_read(&fi->i_compr_blocks)) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_warn(sbi, "%s: partial blocks were reserved i_ino=%lx " "iblocks=%llu, reserved=%u, compr_blocks=%u, " "run fsck to fix.", __func__, inode->i_ino, inode->i_blocks, reserved_blocks, atomic_read(&fi->i_compr_blocks)); } return ret; } static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode, pgoff_t off, block_t block, block_t len, u32 flags) { sector_t sector = SECTOR_FROM_BLOCK(block); sector_t nr_sects = SECTOR_FROM_BLOCK(len); int ret = 0; if (flags & F2FS_TRIM_FILE_DISCARD) { if (bdev_max_secure_erase_sectors(bdev)) ret = blkdev_issue_secure_erase(bdev, sector, nr_sects, GFP_NOFS); else ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS); } if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) { if (IS_ENCRYPTED(inode)) ret = fscrypt_zeroout_range(inode, off, block, len); else ret = blkdev_issue_zeroout(bdev, sector, nr_sects, GFP_NOFS, 0); } return ret; } static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct address_space *mapping = inode->i_mapping; struct block_device *prev_bdev = NULL; struct f2fs_sectrim_range range; pgoff_t index, pg_end, prev_index = 0; block_t prev_block = 0, len = 0; loff_t end_addr; bool to_end = false; int ret = 0; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg, sizeof(range))) return -EFAULT; if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) || !S_ISREG(inode->i_mode)) return -EINVAL; if (((range.flags & F2FS_TRIM_FILE_DISCARD) && !f2fs_hw_support_discard(sbi)) || ((range.flags & F2FS_TRIM_FILE_ZEROOUT) && IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi))) return -EOPNOTSUPP; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) || range.start >= inode->i_size) { ret = -EINVAL; goto err; } if (range.len == 0) goto err; if (inode->i_size - range.start > range.len) { end_addr = range.start + range.len; } else { end_addr = range.len == (u64)-1 ? sbi->sb->s_maxbytes : inode->i_size; to_end = true; } if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) || (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) { ret = -EINVAL; goto err; } index = F2FS_BYTES_TO_BLK(range.start); pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE); ret = f2fs_convert_inline_inode(inode); if (ret) goto err; f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(mapping); ret = filemap_write_and_wait_range(mapping, range.start, to_end ? LLONG_MAX : end_addr - 1); if (ret) goto out; truncate_inode_pages_range(mapping, range.start, to_end ? -1 : end_addr - 1); while (index < pg_end) { struct dnode_of_data dn; pgoff_t end_offset, count; int i; set_new_dnode(&dn, inode, NULL, NULL, 0); ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); if (ret) { if (ret == -ENOENT) { index = f2fs_get_next_page_offset(&dn, index); continue; } goto out; } end_offset = ADDRS_PER_PAGE(dn.node_page, inode); count = min(end_offset - dn.ofs_in_node, pg_end - index); for (i = 0; i < count; i++, index++, dn.ofs_in_node++) { struct block_device *cur_bdev; block_t blkaddr = f2fs_data_blkaddr(&dn); if (!__is_valid_data_blkaddr(blkaddr)) continue; if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { ret = -EFSCORRUPTED; f2fs_put_dnode(&dn); goto out; } cur_bdev = f2fs_target_device(sbi, blkaddr, NULL); if (f2fs_is_multi_device(sbi)) { int di = f2fs_target_device_index(sbi, blkaddr); blkaddr -= FDEV(di).start_blk; } if (len) { if (prev_bdev == cur_bdev && index == prev_index + len && blkaddr == prev_block + len) { len++; } else { ret = f2fs_secure_erase(prev_bdev, inode, prev_index, prev_block, len, range.flags); if (ret) { f2fs_put_dnode(&dn); goto out; } len = 0; } } if (!len) { prev_bdev = cur_bdev; prev_index = index; prev_block = blkaddr; len = 1; } } f2fs_put_dnode(&dn); if (fatal_signal_pending(current)) { ret = -EINTR; goto out; } cond_resched(); } if (len) ret = f2fs_secure_erase(prev_bdev, inode, prev_index, prev_block, len, range.flags); f2fs_update_time(sbi, REQ_TIME); out: filemap_invalidate_unlock(mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); err: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_comp_option option; if (!f2fs_sb_has_compression(F2FS_I_SB(inode))) return -EOPNOTSUPP; inode_lock_shared(inode); if (!f2fs_compressed_file(inode)) { inode_unlock_shared(inode); return -ENODATA; } option.algorithm = F2FS_I(inode)->i_compress_algorithm; option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size; inode_unlock_shared(inode); if (copy_to_user((struct f2fs_comp_option __user *)arg, &option, sizeof(option))) return -EFAULT; return 0; } static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_comp_option option; int ret = 0; if (!f2fs_sb_has_compression(sbi)) return -EOPNOTSUPP; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg, sizeof(option))) return -EFAULT; if (option.log_cluster_size < MIN_COMPRESS_LOG_SIZE || option.log_cluster_size > MAX_COMPRESS_LOG_SIZE || option.algorithm >= COMPRESS_MAX) return -EINVAL; ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); f2fs_down_write(&F2FS_I(inode)->i_sem); if (!f2fs_compressed_file(inode)) { ret = -EINVAL; goto out; } if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) { ret = -EBUSY; goto out; } if (F2FS_HAS_BLOCKS(inode)) { ret = -EFBIG; goto out; } fi->i_compress_algorithm = option.algorithm; fi->i_log_cluster_size = option.log_cluster_size; fi->i_cluster_size = BIT(option.log_cluster_size); /* Set default level */ if (fi->i_compress_algorithm == COMPRESS_ZSTD) fi->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; else fi->i_compress_level = 0; /* Adjust mount option level */ if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm && F2FS_OPTION(sbi).compress_level) fi->i_compress_level = F2FS_OPTION(sbi).compress_level; f2fs_mark_inode_dirty_sync(inode, true); if (!f2fs_is_compress_backend_ready(inode)) f2fs_warn(sbi, "compression algorithm is successfully set, " "but current kernel doesn't support this algorithm."); out: f2fs_up_write(&fi->i_sem); inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len) { DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, page_idx); struct address_space *mapping = inode->i_mapping; struct page *page; pgoff_t redirty_idx = page_idx; int i, page_len = 0, ret = 0; page_cache_ra_unbounded(&ractl, len, 0); for (i = 0; i < len; i++, page_idx++) { page = read_cache_page(mapping, page_idx, NULL, NULL); if (IS_ERR(page)) { ret = PTR_ERR(page); break; } page_len++; } for (i = 0; i < page_len; i++, redirty_idx++) { page = find_lock_page(mapping, redirty_idx); /* It will never fail, when page has pinned above */ f2fs_bug_on(F2FS_I_SB(inode), !page); f2fs_wait_on_page_writeback(page, DATA, true, true); set_page_dirty(page); set_page_private_gcing(page); f2fs_put_page(page, 1); f2fs_put_page(page, 0); } return ret; } static int f2fs_ioc_decompress_file(struct file *filp) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); pgoff_t page_idx = 0, last_idx, cluster_idx; int ret; if (!f2fs_sb_has_compression(sbi) || F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) return -EOPNOTSUPP; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; f2fs_balance_fs(sbi, true); ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (!f2fs_is_compress_backend_ready(inode)) { ret = -EOPNOTSUPP; goto out; } if (!f2fs_compressed_file(inode) || is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { ret = -EINVAL; goto out; } ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); if (ret) goto out; if (!atomic_read(&fi->i_compr_blocks)) goto out; last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); last_idx >>= fi->i_log_cluster_size; for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) { page_idx = cluster_idx << fi->i_log_cluster_size; if (!f2fs_is_compressed_cluster(inode, page_idx)) continue; ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); if (ret < 0) break; if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) { ret = filemap_fdatawrite(inode->i_mapping); if (ret < 0) break; } cond_resched(); if (fatal_signal_pending(current)) { ret = -EINTR; break; } } if (!ret) ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); if (ret) f2fs_warn(sbi, "%s: The file might be partially decompressed (errno=%d). Please delete the file.", __func__, ret); f2fs_update_time(sbi, REQ_TIME); out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static int f2fs_ioc_compress_file(struct file *filp) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); pgoff_t page_idx = 0, last_idx, cluster_idx; int ret; if (!f2fs_sb_has_compression(sbi) || F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER) return -EOPNOTSUPP; if (!(filp->f_mode & FMODE_WRITE)) return -EBADF; f2fs_balance_fs(sbi, true); ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); if (!f2fs_is_compress_backend_ready(inode)) { ret = -EOPNOTSUPP; goto out; } if (!f2fs_compressed_file(inode) || is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) { ret = -EINVAL; goto out; } ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); if (ret) goto out; set_inode_flag(inode, FI_ENABLE_COMPRESS); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); last_idx >>= fi->i_log_cluster_size; for (cluster_idx = 0; cluster_idx < last_idx; cluster_idx++) { page_idx = cluster_idx << fi->i_log_cluster_size; if (f2fs_is_sparse_cluster(inode, page_idx)) continue; ret = redirty_blocks(inode, page_idx, fi->i_cluster_size); if (ret < 0) break; if (get_dirty_pages(inode) >= BLKS_PER_SEG(sbi)) { ret = filemap_fdatawrite(inode->i_mapping); if (ret < 0) break; } cond_resched(); if (fatal_signal_pending(current)) { ret = -EINTR; break; } } if (!ret) ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); clear_inode_flag(inode, FI_ENABLE_COMPRESS); if (ret) f2fs_warn(sbi, "%s: The file might be partially compressed (errno=%d). Please delete the file.", __func__, ret); f2fs_update_time(sbi, REQ_TIME); out: inode_unlock(inode); mnt_drop_write_file(filp); return ret; } static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { switch (cmd) { case FS_IOC_GETVERSION: return f2fs_ioc_getversion(filp, arg); case F2FS_IOC_START_ATOMIC_WRITE: return f2fs_ioc_start_atomic_write(filp, false); case F2FS_IOC_START_ATOMIC_REPLACE: return f2fs_ioc_start_atomic_write(filp, true); case F2FS_IOC_COMMIT_ATOMIC_WRITE: return f2fs_ioc_commit_atomic_write(filp); case F2FS_IOC_ABORT_ATOMIC_WRITE: return f2fs_ioc_abort_atomic_write(filp); case F2FS_IOC_START_VOLATILE_WRITE: case F2FS_IOC_RELEASE_VOLATILE_WRITE: return -EOPNOTSUPP; case F2FS_IOC_SHUTDOWN: return f2fs_ioc_shutdown(filp, arg); case FITRIM: return f2fs_ioc_fitrim(filp, arg); case FS_IOC_SET_ENCRYPTION_POLICY: return f2fs_ioc_set_encryption_policy(filp, arg); case FS_IOC_GET_ENCRYPTION_POLICY: return f2fs_ioc_get_encryption_policy(filp, arg); case FS_IOC_GET_ENCRYPTION_PWSALT: return f2fs_ioc_get_encryption_pwsalt(filp, arg); case FS_IOC_GET_ENCRYPTION_POLICY_EX: return f2fs_ioc_get_encryption_policy_ex(filp, arg); case FS_IOC_ADD_ENCRYPTION_KEY: return f2fs_ioc_add_encryption_key(filp, arg); case FS_IOC_REMOVE_ENCRYPTION_KEY: return f2fs_ioc_remove_encryption_key(filp, arg); case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: return f2fs_ioc_remove_encryption_key_all_users(filp, arg); case FS_IOC_GET_ENCRYPTION_KEY_STATUS: return f2fs_ioc_get_encryption_key_status(filp, arg); case FS_IOC_GET_ENCRYPTION_NONCE: return f2fs_ioc_get_encryption_nonce(filp, arg); case F2FS_IOC_GARBAGE_COLLECT: return f2fs_ioc_gc(filp, arg); case F2FS_IOC_GARBAGE_COLLECT_RANGE: return f2fs_ioc_gc_range(filp, arg); case F2FS_IOC_WRITE_CHECKPOINT: return f2fs_ioc_write_checkpoint(filp); case F2FS_IOC_DEFRAGMENT: return f2fs_ioc_defragment(filp, arg); case F2FS_IOC_MOVE_RANGE: return f2fs_ioc_move_range(filp, arg); case F2FS_IOC_FLUSH_DEVICE: return f2fs_ioc_flush_device(filp, arg); case F2FS_IOC_GET_FEATURES: return f2fs_ioc_get_features(filp, arg); case F2FS_IOC_GET_PIN_FILE: return f2fs_ioc_get_pin_file(filp, arg); case F2FS_IOC_SET_PIN_FILE: return f2fs_ioc_set_pin_file(filp, arg); case F2FS_IOC_PRECACHE_EXTENTS: return f2fs_ioc_precache_extents(filp); case F2FS_IOC_RESIZE_FS: return f2fs_ioc_resize_fs(filp, arg); case FS_IOC_ENABLE_VERITY: return f2fs_ioc_enable_verity(filp, arg); case FS_IOC_MEASURE_VERITY: return f2fs_ioc_measure_verity(filp, arg); case FS_IOC_READ_VERITY_METADATA: return f2fs_ioc_read_verity_metadata(filp, arg); case FS_IOC_GETFSLABEL: return f2fs_ioc_getfslabel(filp, arg); case FS_IOC_SETFSLABEL: return f2fs_ioc_setfslabel(filp, arg); case F2FS_IOC_GET_COMPRESS_BLOCKS: return f2fs_ioc_get_compress_blocks(filp, arg); case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: return f2fs_release_compress_blocks(filp, arg); case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: return f2fs_reserve_compress_blocks(filp, arg); case F2FS_IOC_SEC_TRIM_FILE: return f2fs_sec_trim_file(filp, arg); case F2FS_IOC_GET_COMPRESS_OPTION: return f2fs_ioc_get_compress_option(filp, arg); case F2FS_IOC_SET_COMPRESS_OPTION: return f2fs_ioc_set_compress_option(filp, arg); case F2FS_IOC_DECOMPRESS_FILE: return f2fs_ioc_decompress_file(filp); case F2FS_IOC_COMPRESS_FILE: return f2fs_ioc_compress_file(filp); case F2FS_IOC_GET_DEV_ALIAS_FILE: return f2fs_ioc_get_dev_alias_file(filp, arg); default: return -ENOTTY; } } long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) return -EIO; if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp)))) return -ENOSPC; return __f2fs_ioctl(filp, cmd, arg); } /* * Return %true if the given read or write request should use direct I/O, or * %false if it should use buffered I/O. */ static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb, struct iov_iter *iter) { unsigned int align; if (!(iocb->ki_flags & IOCB_DIRECT)) return false; if (f2fs_force_buffered_io(inode, iov_iter_rw(iter))) return false; /* * Direct I/O not aligned to the disk's logical_block_size will be * attempted, but will fail with -EINVAL. * * f2fs additionally requires that direct I/O be aligned to the * filesystem block size, which is often a stricter requirement. * However, f2fs traditionally falls back to buffered I/O on requests * that are logical_block_size-aligned but not fs-block aligned. * * The below logic implements this behavior. */ align = iocb->ki_pos | iov_iter_alignment(iter); if (!IS_ALIGNED(align, i_blocksize(inode)) && IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev))) return false; return true; } static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error, unsigned int flags) { struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp)); dec_page_count(sbi, F2FS_DIO_READ); if (error) return error; f2fs_update_iostat(sbi, NULL, APP_DIRECT_READ_IO, size); return 0; } static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = { .end_io = f2fs_dio_read_end_io, }; static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); const loff_t pos = iocb->ki_pos; const size_t count = iov_iter_count(to); struct iomap_dio *dio; ssize_t ret; if (count == 0) return 0; /* skip atime update */ trace_f2fs_direct_IO_enter(inode, iocb, count, READ); if (iocb->ki_flags & IOCB_NOWAIT) { if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) { ret = -EAGAIN; goto out; } } else { f2fs_down_read(&fi->i_gc_rwsem[READ]); } /* dio is not compatible w/ atomic file */ if (f2fs_is_atomic_file(inode)) { f2fs_up_read(&fi->i_gc_rwsem[READ]); ret = -EOPNOTSUPP; goto out; } /* * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of * the higher-level function iomap_dio_rw() in order to ensure that the * F2FS_DIO_READ counter will be decremented correctly in all cases. */ inc_page_count(sbi, F2FS_DIO_READ); dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops, &f2fs_iomap_dio_read_ops, 0, NULL, 0); if (IS_ERR_OR_NULL(dio)) { ret = PTR_ERR_OR_ZERO(dio); if (ret != -EIOCBQUEUED) dec_page_count(sbi, F2FS_DIO_READ); } else { ret = iomap_dio_complete(dio); } f2fs_up_read(&fi->i_gc_rwsem[READ]); file_accessed(file); out: trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret); return ret; } static void f2fs_trace_rw_file_path(struct file *file, loff_t pos, size_t count, int rw) { struct inode *inode = file_inode(file); char *buf, *path; buf = f2fs_getname(F2FS_I_SB(inode)); if (!buf) return; path = dentry_path_raw(file_dentry(file), buf, PATH_MAX); if (IS_ERR(path)) goto free_buf; if (rw == WRITE) trace_f2fs_datawrite_start(inode, pos, count, current->pid, path, current->comm); else trace_f2fs_dataread_start(inode, pos, count, current->pid, path, current->comm); free_buf: f2fs_putname(buf); } static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct inode *inode = file_inode(iocb->ki_filp); const loff_t pos = iocb->ki_pos; ssize_t ret; if (!f2fs_is_compress_backend_ready(inode)) return -EOPNOTSUPP; if (trace_f2fs_dataread_start_enabled()) f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos, iov_iter_count(to), READ); /* In LFS mode, if there is inflight dio, wait for its completion */ if (f2fs_lfs_mode(F2FS_I_SB(inode)) && get_pages(F2FS_I_SB(inode), F2FS_DIO_WRITE)) inode_dio_wait(inode); if (f2fs_should_use_dio(inode, iocb, to)) { ret = f2fs_dio_read_iter(iocb, to); } else { ret = filemap_read(iocb, to, 0); if (ret > 0) f2fs_update_iostat(F2FS_I_SB(inode), inode, APP_BUFFERED_READ_IO, ret); } if (trace_f2fs_dataread_end_enabled()) trace_f2fs_dataread_end(inode, pos, ret); return ret; } static ssize_t f2fs_file_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct inode *inode = file_inode(in); const loff_t pos = *ppos; ssize_t ret; if (!f2fs_is_compress_backend_ready(inode)) return -EOPNOTSUPP; if (trace_f2fs_dataread_start_enabled()) f2fs_trace_rw_file_path(in, pos, len, READ); ret = filemap_splice_read(in, ppos, pipe, len, flags); if (ret > 0) f2fs_update_iostat(F2FS_I_SB(inode), inode, APP_BUFFERED_READ_IO, ret); if (trace_f2fs_dataread_end_enabled()) trace_f2fs_dataread_end(inode, pos, ret); return ret; } static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); ssize_t count; int err; if (IS_IMMUTABLE(inode)) return -EPERM; if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) return -EPERM; count = generic_write_checks(iocb, from); if (count <= 0) return count; err = file_modified(file); if (err) return err; return count; } /* * Preallocate blocks for a write request, if it is possible and helpful to do * so. Returns a positive number if blocks may have been preallocated, 0 if no * blocks were preallocated, or a negative errno value if something went * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the * requested blocks (not just some of them) have been allocated. */ static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter, bool dio) { struct inode *inode = file_inode(iocb->ki_filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); const loff_t pos = iocb->ki_pos; const size_t count = iov_iter_count(iter); struct f2fs_map_blocks map = {}; int flag; int ret; /* If it will be an out-of-place direct write, don't bother. */ if (dio && f2fs_lfs_mode(sbi)) return 0; /* * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into * buffered IO, if DIO meets any holes. */ if (dio && i_size_read(inode) && (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode)))) return 0; /* No-wait I/O can't allocate blocks. */ if (iocb->ki_flags & IOCB_NOWAIT) return 0; /* If it will be a short write, don't bother. */ if (fault_in_iov_iter_readable(iter, count)) return 0; if (f2fs_has_inline_data(inode)) { /* If the data will fit inline, don't bother. */ if (pos + count <= MAX_INLINE_DATA(inode)) return 0; ret = f2fs_convert_inline_inode(inode); if (ret) return ret; } /* Do not preallocate blocks that will be written partially in 4KB. */ map.m_lblk = F2FS_BLK_ALIGN(pos); map.m_len = F2FS_BYTES_TO_BLK(pos + count); if (map.m_len > map.m_lblk) map.m_len -= map.m_lblk; else return 0; if (!IS_DEVICE_ALIASING(inode)) map.m_may_create = true; if (dio) { map.m_seg_type = f2fs_rw_hint_to_seg_type(sbi, inode->i_write_hint); flag = F2FS_GET_BLOCK_PRE_DIO; } else { map.m_seg_type = NO_CHECK_TYPE; flag = F2FS_GET_BLOCK_PRE_AIO; } ret = f2fs_map_blocks(inode, &map, flag); /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */ if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0)) return ret; if (ret == 0) set_inode_flag(inode, FI_PREALLOCATED_ALL); return map.m_len; } static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); ssize_t ret; if (iocb->ki_flags & IOCB_NOWAIT) return -EOPNOTSUPP; ret = generic_perform_write(iocb, from); if (ret > 0) { f2fs_update_iostat(F2FS_I_SB(inode), inode, APP_BUFFERED_IO, ret); } return ret; } static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error, unsigned int flags) { struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp)); dec_page_count(sbi, F2FS_DIO_WRITE); if (error) return error; f2fs_update_time(sbi, REQ_TIME); f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size); return 0; } static void f2fs_dio_write_submit_io(const struct iomap_iter *iter, struct bio *bio, loff_t file_offset) { struct inode *inode = iter->inode; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); enum log_type type = f2fs_rw_hint_to_seg_type(sbi, inode->i_write_hint); enum temp_type temp = f2fs_get_segment_temp(sbi, type); bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, DATA, temp); submit_bio(bio); } static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = { .end_io = f2fs_dio_write_end_io, .submit_io = f2fs_dio_write_submit_io, }; static void f2fs_flush_buffered_write(struct address_space *mapping, loff_t start_pos, loff_t end_pos) { int ret; ret = filemap_write_and_wait_range(mapping, start_pos, end_pos); if (ret < 0) return; invalidate_mapping_pages(mapping, start_pos >> PAGE_SHIFT, end_pos >> PAGE_SHIFT); } static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from, bool *may_need_sync) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); const bool do_opu = f2fs_lfs_mode(sbi); const loff_t pos = iocb->ki_pos; const ssize_t count = iov_iter_count(from); unsigned int dio_flags; struct iomap_dio *dio; ssize_t ret; trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE); if (iocb->ki_flags & IOCB_NOWAIT) { /* f2fs_convert_inline_inode() and block allocation can block */ if (f2fs_has_inline_data(inode) || !f2fs_overwrite_io(inode, pos, count)) { ret = -EAGAIN; goto out; } if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[WRITE])) { ret = -EAGAIN; goto out; } if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) { f2fs_up_read(&fi->i_gc_rwsem[WRITE]); ret = -EAGAIN; goto out; } } else { ret = f2fs_convert_inline_inode(inode); if (ret) goto out; f2fs_down_read(&fi->i_gc_rwsem[WRITE]); if (do_opu) f2fs_down_read(&fi->i_gc_rwsem[READ]); } /* * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of * the higher-level function iomap_dio_rw() in order to ensure that the * F2FS_DIO_WRITE counter will be decremented correctly in all cases. */ inc_page_count(sbi, F2FS_DIO_WRITE); dio_flags = 0; if (pos + count > inode->i_size) dio_flags |= IOMAP_DIO_FORCE_WAIT; dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops, &f2fs_iomap_dio_write_ops, dio_flags, NULL, 0); if (IS_ERR_OR_NULL(dio)) { ret = PTR_ERR_OR_ZERO(dio); if (ret == -ENOTBLK) ret = 0; if (ret != -EIOCBQUEUED) dec_page_count(sbi, F2FS_DIO_WRITE); } else { ret = iomap_dio_complete(dio); } if (do_opu) f2fs_up_read(&fi->i_gc_rwsem[READ]); f2fs_up_read(&fi->i_gc_rwsem[WRITE]); if (ret < 0) goto out; if (pos + ret > inode->i_size) f2fs_i_size_write(inode, pos + ret); if (!do_opu) set_inode_flag(inode, FI_UPDATE_WRITE); if (iov_iter_count(from)) { ssize_t ret2; loff_t bufio_start_pos = iocb->ki_pos; /* * The direct write was partial, so we need to fall back to a * buffered write for the remainder. */ ret2 = f2fs_buffered_write_iter(iocb, from); if (iov_iter_count(from)) f2fs_write_failed(inode, iocb->ki_pos); if (ret2 < 0) goto out; /* * Ensure that the pagecache pages are written to disk and * invalidated to preserve the expected O_DIRECT semantics. */ if (ret2 > 0) { loff_t bufio_end_pos = bufio_start_pos + ret2 - 1; ret += ret2; f2fs_flush_buffered_write(file->f_mapping, bufio_start_pos, bufio_end_pos); } } else { /* iomap_dio_rw() already handled the generic_write_sync(). */ *may_need_sync = false; } out: trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret); return ret; } static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); const loff_t orig_pos = iocb->ki_pos; const size_t orig_count = iov_iter_count(from); loff_t target_size; bool dio; bool may_need_sync = true; int preallocated; const loff_t pos = iocb->ki_pos; const ssize_t count = iov_iter_count(from); ssize_t ret; if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { ret = -EIO; goto out; } if (!f2fs_is_compress_backend_ready(inode)) { ret = -EOPNOTSUPP; goto out; } if (iocb->ki_flags & IOCB_NOWAIT) { if (!inode_trylock(inode)) { ret = -EAGAIN; goto out; } } else { inode_lock(inode); } if (f2fs_is_pinned_file(inode) && !f2fs_overwrite_io(inode, pos, count)) { ret = -EIO; goto out_unlock; } ret = f2fs_write_checks(iocb, from); if (ret <= 0) goto out_unlock; /* Determine whether we will do a direct write or a buffered write. */ dio = f2fs_should_use_dio(inode, iocb, from); /* dio is not compatible w/ atomic write */ if (dio && f2fs_is_atomic_file(inode)) { ret = -EOPNOTSUPP; goto out_unlock; } /* Possibly preallocate the blocks for the write. */ target_size = iocb->ki_pos + iov_iter_count(from); preallocated = f2fs_preallocate_blocks(iocb, from, dio); if (preallocated < 0) { ret = preallocated; } else { if (trace_f2fs_datawrite_start_enabled()) f2fs_trace_rw_file_path(iocb->ki_filp, iocb->ki_pos, orig_count, WRITE); /* Do the actual write. */ ret = dio ? f2fs_dio_write_iter(iocb, from, &may_need_sync) : f2fs_buffered_write_iter(iocb, from); if (trace_f2fs_datawrite_end_enabled()) trace_f2fs_datawrite_end(inode, orig_pos, ret); } /* Don't leave any preallocated blocks around past i_size. */ if (preallocated && i_size_read(inode) < target_size) { f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); if (!f2fs_truncate(inode)) file_dont_truncate(inode); filemap_invalidate_unlock(inode->i_mapping); f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } else { file_dont_truncate(inode); } clear_inode_flag(inode, FI_PREALLOCATED_ALL); out_unlock: inode_unlock(inode); out: trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret); if (ret > 0 && may_need_sync) ret = generic_write_sync(iocb, ret); /* If buffered IO was forced, flush and drop the data from * the page cache to preserve O_DIRECT semantics */ if (ret > 0 && !dio && (iocb->ki_flags & IOCB_DIRECT)) f2fs_flush_buffered_write(iocb->ki_filp->f_mapping, orig_pos, orig_pos + ret - 1); return ret; } static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len, int advice) { struct address_space *mapping; struct backing_dev_info *bdi; struct inode *inode = file_inode(filp); int err; if (advice == POSIX_FADV_SEQUENTIAL) { if (S_ISFIFO(inode->i_mode)) return -ESPIPE; mapping = filp->f_mapping; if (!mapping || len < 0) return -EINVAL; bdi = inode_to_bdi(mapping->host); filp->f_ra.ra_pages = bdi->ra_pages * F2FS_I_SB(inode)->seq_file_ra_mul; spin_lock(&filp->f_lock); filp->f_mode &= ~FMODE_RANDOM; spin_unlock(&filp->f_lock); return 0; } else if (advice == POSIX_FADV_WILLNEED && offset == 0) { /* Load extent cache at the first readahead. */ f2fs_precache_extents(inode); } err = generic_fadvise(filp, offset, len, advice); if (!err && advice == POSIX_FADV_DONTNEED && test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) && f2fs_compressed_file(inode)) f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino); return err; } #ifdef CONFIG_COMPAT struct compat_f2fs_gc_range { u32 sync; compat_u64 start; compat_u64 len; }; #define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\ struct compat_f2fs_gc_range) static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg) { struct compat_f2fs_gc_range __user *urange; struct f2fs_gc_range range; int err; urange = compat_ptr(arg); err = get_user(range.sync, &urange->sync); err |= get_user(range.start, &urange->start); err |= get_user(range.len, &urange->len); if (err) return -EFAULT; return __f2fs_ioc_gc_range(file, &range); } struct compat_f2fs_move_range { u32 dst_fd; compat_u64 pos_in; compat_u64 pos_out; compat_u64 len; }; #define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \ struct compat_f2fs_move_range) static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg) { struct compat_f2fs_move_range __user *urange; struct f2fs_move_range range; int err; urange = compat_ptr(arg); err = get_user(range.dst_fd, &urange->dst_fd); err |= get_user(range.pos_in, &urange->pos_in); err |= get_user(range.pos_out, &urange->pos_out); err |= get_user(range.len, &urange->len); if (err) return -EFAULT; return __f2fs_ioc_move_range(file, &range); } long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file))))) return -EIO; if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file)))) return -ENOSPC; switch (cmd) { case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; case F2FS_IOC32_GARBAGE_COLLECT_RANGE: return f2fs_compat_ioc_gc_range(file, arg); case F2FS_IOC32_MOVE_RANGE: return f2fs_compat_ioc_move_range(file, arg); case F2FS_IOC_START_ATOMIC_WRITE: case F2FS_IOC_START_ATOMIC_REPLACE: case F2FS_IOC_COMMIT_ATOMIC_WRITE: case F2FS_IOC_START_VOLATILE_WRITE: case F2FS_IOC_RELEASE_VOLATILE_WRITE: case F2FS_IOC_ABORT_ATOMIC_WRITE: case F2FS_IOC_SHUTDOWN: case FITRIM: case FS_IOC_SET_ENCRYPTION_POLICY: case FS_IOC_GET_ENCRYPTION_PWSALT: case FS_IOC_GET_ENCRYPTION_POLICY: case FS_IOC_GET_ENCRYPTION_POLICY_EX: case FS_IOC_ADD_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS: case FS_IOC_GET_ENCRYPTION_KEY_STATUS: case FS_IOC_GET_ENCRYPTION_NONCE: case F2FS_IOC_GARBAGE_COLLECT: case F2FS_IOC_WRITE_CHECKPOINT: case F2FS_IOC_DEFRAGMENT: case F2FS_IOC_FLUSH_DEVICE: case F2FS_IOC_GET_FEATURES: case F2FS_IOC_GET_PIN_FILE: case F2FS_IOC_SET_PIN_FILE: case F2FS_IOC_PRECACHE_EXTENTS: case F2FS_IOC_RESIZE_FS: case FS_IOC_ENABLE_VERITY: case FS_IOC_MEASURE_VERITY: case FS_IOC_READ_VERITY_METADATA: case FS_IOC_GETFSLABEL: case FS_IOC_SETFSLABEL: case F2FS_IOC_GET_COMPRESS_BLOCKS: case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: case F2FS_IOC_SEC_TRIM_FILE: case F2FS_IOC_GET_COMPRESS_OPTION: case F2FS_IOC_SET_COMPRESS_OPTION: case F2FS_IOC_DECOMPRESS_FILE: case F2FS_IOC_COMPRESS_FILE: case F2FS_IOC_GET_DEV_ALIAS_FILE: break; default: return -ENOIOCTLCMD; } return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif const struct file_operations f2fs_file_operations = { .llseek = f2fs_llseek, .read_iter = f2fs_file_read_iter, .write_iter = f2fs_file_write_iter, .iopoll = iocb_bio_iopoll, .open = f2fs_file_open, .release = f2fs_release_file, .mmap = f2fs_file_mmap, .flush = f2fs_file_flush, .fsync = f2fs_sync_file, .fallocate = f2fs_fallocate, .unlocked_ioctl = f2fs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = f2fs_compat_ioctl, #endif .splice_read = f2fs_file_splice_read, .splice_write = iter_file_splice_write, .fadvise = f2fs_file_fadvise, .fop_flags = FOP_BUFFER_RASYNC, };
6 6 1 1 1 1 1 1 1 1 1 1 1 1 1 1 3 38 39 39 37 37 25 38 38 1 2 1 1 29 7 7 6 4 2 3 1 4 4 1 8 8 5 3 3 1 2 2 1 6 7 1 2 2 1 6 4 3 2 1 1 1 2 1 3 2 11 9 3 2 3 3 3 2 1 1 1 1 1 1 1 1 2 4 1 2 1 3 2 3 2 1 1 1 3 3 6 1 1 3 2 1 14 39 38 11 39 39 38 39 36 38 37 39 39 39 39 36 39 1 36 39 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2024 Intel Corporation * * element parsing for mac80211 */ #include <net/mac80211.h> #include <linux/netdevice.h> #include <linux/export.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/if_arp.h> #include <linux/bitmap.h> #include <linux/crc32.h> #include <net/net_namespace.h> #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <kunit/visibility.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "mesh.h" #include "wme.h" #include "led.h" #include "wep.h" struct ieee80211_elems_parse { /* must be first for kfree to work */ struct ieee802_11_elems elems; /* The basic Multi-Link element in the original elements */ const struct element *ml_basic_elem; /* The reconfiguration Multi-Link element in the original elements */ const struct element *ml_reconf_elem; /* The EPCS Multi-Link element in the original elements */ const struct element *ml_epcs_elem; /* * scratch buffer that can be used for various element parsing related * tasks, e.g., element de-fragmentation etc. */ size_t scratch_len; u8 *scratch_pos; u8 scratch[] __counted_by(scratch_len); }; static void ieee80211_parse_extension_element(u32 *crc, const struct element *elem, struct ieee80211_elems_parse *elems_parse, struct ieee80211_elems_parse_params *params) { struct ieee802_11_elems *elems = &elems_parse->elems; const void *data = elem->data + 1; bool calc_crc = false; u8 len; if (!elem->datalen) return; len = elem->datalen - 1; switch (elem->data[0]) { case WLAN_EID_EXT_HE_MU_EDCA: if (params->mode < IEEE80211_CONN_MODE_HE) break; calc_crc = true; if (len >= sizeof(*elems->mu_edca_param_set)) elems->mu_edca_param_set = data; break; case WLAN_EID_EXT_HE_CAPABILITY: if (params->mode < IEEE80211_CONN_MODE_HE) break; if (ieee80211_he_capa_size_ok(data, len)) { elems->he_cap = data; elems->he_cap_len = len; } break; case WLAN_EID_EXT_HE_OPERATION: if (params->mode < IEEE80211_CONN_MODE_HE) break; calc_crc = true; if (len >= sizeof(*elems->he_operation) && len >= ieee80211_he_oper_size(data) - 1) elems->he_operation = data; break; case WLAN_EID_EXT_UORA: if (params->mode < IEEE80211_CONN_MODE_HE) break; if (len >= 1) elems->uora_element = data; break; case WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME: if (len == 3) elems->max_channel_switch_time = data; break; case WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION: if (len >= sizeof(*elems->mbssid_config_ie)) elems->mbssid_config_ie = data; break; case WLAN_EID_EXT_HE_SPR: if (params->mode < IEEE80211_CONN_MODE_HE) break; if (len >= sizeof(*elems->he_spr) && len >= ieee80211_he_spr_size(data) - 1) elems->he_spr = data; break; case WLAN_EID_EXT_HE_6GHZ_CAPA: if (params->mode < IEEE80211_CONN_MODE_HE) break; if (len >= sizeof(*elems->he_6ghz_capa)) elems->he_6ghz_capa = data; break; case WLAN_EID_EXT_EHT_CAPABILITY: if (params->mode < IEEE80211_CONN_MODE_EHT) break; if (ieee80211_eht_capa_size_ok(elems->he_cap, data, len, params->from_ap)) { elems->eht_cap = data; elems->eht_cap_len = len; } break; case WLAN_EID_EXT_EHT_OPERATION: if (params->mode < IEEE80211_CONN_MODE_EHT) break; if (ieee80211_eht_oper_size_ok(data, len)) elems->eht_operation = data; calc_crc = true; break; case WLAN_EID_EXT_EHT_MULTI_LINK: if (params->mode < IEEE80211_CONN_MODE_EHT) break; calc_crc = true; if (ieee80211_mle_size_ok(data, len)) { const struct ieee80211_multi_link_elem *mle = (void *)data; switch (le16_get_bits(mle->control, IEEE80211_ML_CONTROL_TYPE)) { case IEEE80211_ML_CONTROL_TYPE_BASIC: if (elems_parse->ml_basic_elem) { elems->parse_error |= IEEE80211_PARSE_ERR_DUP_NEST_ML_BASIC; break; } elems_parse->ml_basic_elem = elem; break; case IEEE80211_ML_CONTROL_TYPE_RECONF: elems_parse->ml_reconf_elem = elem; break; case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: elems_parse->ml_epcs_elem = elem; break; default: break; } } break; case WLAN_EID_EXT_BANDWIDTH_INDICATION: if (params->mode < IEEE80211_CONN_MODE_EHT) break; if (ieee80211_bandwidth_indication_size_ok(data, len)) elems->bandwidth_indication = data; calc_crc = true; break; case WLAN_EID_EXT_TID_TO_LINK_MAPPING: if (params->mode < IEEE80211_CONN_MODE_EHT) break; calc_crc = true; if (ieee80211_tid_to_link_map_size_ok(data, len) && elems->ttlm_num < ARRAY_SIZE(elems->ttlm)) { elems->ttlm[elems->ttlm_num] = (void *)data; elems->ttlm_num++; } break; } if (crc && calc_crc) *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2); } static void ieee80211_parse_tpe(struct ieee80211_parsed_tpe *tpe, const u8 *data, u8 len) { const struct ieee80211_tx_pwr_env *env = (const void *)data; u8 count, interpret, category; u8 *out, N, *cnt_out = NULL, *N_out = NULL; if (!ieee80211_valid_tpe_element(data, len)) return; count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT); interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET); category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY); switch (interpret) { case IEEE80211_TPE_LOCAL_EIRP: out = tpe->max_local[category].power; cnt_out = &tpe->max_local[category].count; tpe->max_local[category].valid = true; break; case IEEE80211_TPE_REG_CLIENT_EIRP: out = tpe->max_reg_client[category].power; cnt_out = &tpe->max_reg_client[category].count; tpe->max_reg_client[category].valid = true; break; case IEEE80211_TPE_LOCAL_EIRP_PSD: out = tpe->psd_local[category].power; cnt_out = &tpe->psd_local[category].count; N_out = &tpe->psd_local[category].n; tpe->psd_local[category].valid = true; break; case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: out = tpe->psd_reg_client[category].power; cnt_out = &tpe->psd_reg_client[category].count; N_out = &tpe->psd_reg_client[category].n; tpe->psd_reg_client[category].valid = true; break; } switch (interpret) { case IEEE80211_TPE_LOCAL_EIRP: case IEEE80211_TPE_REG_CLIENT_EIRP: /* count was validated <= 3, plus 320 MHz */ BUILD_BUG_ON(IEEE80211_TPE_EIRP_ENTRIES_320MHZ < 5); memcpy(out, env->variable, count + 1); *cnt_out = count + 1; /* separately take 320 MHz if present */ if (count == 3 && len > sizeof(*env) + count + 1) { out[4] = env->variable[4]; *cnt_out = 5; } break; case IEEE80211_TPE_LOCAL_EIRP_PSD: case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: if (!count) { memset(out, env->variable[0], IEEE80211_TPE_PSD_ENTRIES_320MHZ); *cnt_out = IEEE80211_TPE_PSD_ENTRIES_320MHZ; break; } N = 1 << (count - 1); memcpy(out, env->variable, N); *cnt_out = N; *N_out = N; if (len > sizeof(*env) + N) { int K = u8_get_bits(env->variable[N], IEEE80211_TX_PWR_ENV_EXT_COUNT); K = min(K, IEEE80211_TPE_PSD_ENTRIES_320MHZ - N); memcpy(out + N, env->variable + N + 1, K); (*cnt_out) += K; } break; } } static u32 _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, struct ieee80211_elems_parse *elems_parse, const struct element *check_inherit) { struct ieee802_11_elems *elems = &elems_parse->elems; const struct element *elem; bool calc_crc = params->filter != 0; DECLARE_BITMAP(seen_elems, 256); u32 crc = params->crc; bitmap_zero(seen_elems, 256); for_each_element(elem, params->start, params->len) { const struct element *subelem; u8 elem_parse_failed; u8 id = elem->id; u8 elen = elem->datalen; const u8 *pos = elem->data; if (check_inherit && !cfg80211_is_element_inherited(elem, check_inherit)) continue; switch (id) { case WLAN_EID_SSID: case WLAN_EID_SUPP_RATES: case WLAN_EID_FH_PARAMS: case WLAN_EID_DS_PARAMS: case WLAN_EID_CF_PARAMS: case WLAN_EID_TIM: case WLAN_EID_IBSS_PARAMS: case WLAN_EID_CHALLENGE: case WLAN_EID_RSN: case WLAN_EID_ERP_INFO: case WLAN_EID_EXT_SUPP_RATES: case WLAN_EID_HT_CAPABILITY: case WLAN_EID_HT_OPERATION: case WLAN_EID_VHT_CAPABILITY: case WLAN_EID_VHT_OPERATION: case WLAN_EID_MESH_ID: case WLAN_EID_MESH_CONFIG: case WLAN_EID_PEER_MGMT: case WLAN_EID_PREQ: case WLAN_EID_PREP: case WLAN_EID_PERR: case WLAN_EID_RANN: case WLAN_EID_CHANNEL_SWITCH: case WLAN_EID_EXT_CHANSWITCH_ANN: case WLAN_EID_COUNTRY: case WLAN_EID_PWR_CONSTRAINT: case WLAN_EID_TIMEOUT_INTERVAL: case WLAN_EID_SECONDARY_CHANNEL_OFFSET: case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: case WLAN_EID_CHAN_SWITCH_PARAM: case WLAN_EID_EXT_CAPABILITY: case WLAN_EID_CHAN_SWITCH_TIMING: case WLAN_EID_LINK_ID: case WLAN_EID_BSS_MAX_IDLE_PERIOD: case WLAN_EID_RSNX: case WLAN_EID_S1G_BCN_COMPAT: case WLAN_EID_S1G_CAPABILITIES: case WLAN_EID_S1G_OPERATION: case WLAN_EID_AID_RESPONSE: case WLAN_EID_S1G_SHORT_BCN_INTERVAL: /* * not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible * that if the content gets bigger it might be needed more than once */ if (test_bit(id, seen_elems)) { elems->parse_error |= IEEE80211_PARSE_ERR_DUP_ELEM; continue; } break; } if (calc_crc && id < 64 && (params->filter & (1ULL << id))) crc = crc32_be(crc, pos - 2, elen + 2); elem_parse_failed = 0; switch (id) { case WLAN_EID_LINK_ID: if (elen + 2 < sizeof(struct ieee80211_tdls_lnkie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->lnk_id = (void *)(pos - 2); break; case WLAN_EID_CHAN_SWITCH_TIMING: if (elen < sizeof(struct ieee80211_ch_switch_timing)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->ch_sw_timing = (void *)pos; break; case WLAN_EID_EXT_CAPABILITY: elems->ext_capab = pos; elems->ext_capab_len = elen; break; case WLAN_EID_SSID: elems->ssid = pos; elems->ssid_len = elen; break; case WLAN_EID_SUPP_RATES: elems->supp_rates = pos; elems->supp_rates_len = elen; break; case WLAN_EID_DS_PARAMS: if (elen >= 1) elems->ds_params = pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_TIM: if (elen >= sizeof(struct ieee80211_tim_ie)) { elems->tim = (void *)pos; elems->tim_len = elen; } else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_VENDOR_SPECIFIC: if (elen >= 4 && pos[0] == 0x00 && pos[1] == 0x50 && pos[2] == 0xf2) { /* Microsoft OUI (00:50:F2) */ if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); if (elen >= 5 && pos[3] == 2) { /* OUI Type 2 - WMM IE */ if (pos[4] == 0) { elems->wmm_info = pos; elems->wmm_info_len = elen; } else if (pos[4] == 1) { elems->wmm_param = pos; elems->wmm_param_len = elen; } } } break; case WLAN_EID_RSN: elems->rsn = pos; elems->rsn_len = elen; break; case WLAN_EID_ERP_INFO: if (elen >= 1) elems->erp_info = pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_EXT_SUPP_RATES: elems->ext_supp_rates = pos; elems->ext_supp_rates_len = elen; break; case WLAN_EID_HT_CAPABILITY: if (params->mode < IEEE80211_CONN_MODE_HT) break; if (elen >= sizeof(struct ieee80211_ht_cap)) elems->ht_cap_elem = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_HT_OPERATION: if (params->mode < IEEE80211_CONN_MODE_HT) break; if (elen >= sizeof(struct ieee80211_ht_operation)) elems->ht_operation = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_VHT_CAPABILITY: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (elen >= sizeof(struct ieee80211_vht_cap)) elems->vht_cap_elem = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_VHT_OPERATION: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (elen >= sizeof(struct ieee80211_vht_operation)) { elems->vht_operation = (void *)pos; if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); break; } elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_OPMODE_NOTIF: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (elen > 0) { elems->opmode_notif = pos; if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); break; } elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_MESH_ID: elems->mesh_id = pos; elems->mesh_id_len = elen; break; case WLAN_EID_MESH_CONFIG: if (elen >= sizeof(struct ieee80211_meshconf_ie)) elems->mesh_config = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_PEER_MGMT: elems->peering = pos; elems->peering_len = elen; break; case WLAN_EID_MESH_AWAKE_WINDOW: if (elen >= 2) elems->awake_window = (void *)pos; break; case WLAN_EID_PREQ: elems->preq = pos; elems->preq_len = elen; break; case WLAN_EID_PREP: elems->prep = pos; elems->prep_len = elen; break; case WLAN_EID_PERR: elems->perr = pos; elems->perr_len = elen; break; case WLAN_EID_RANN: if (elen >= sizeof(struct ieee80211_rann_ie)) elems->rann = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_CHANNEL_SWITCH: if (elen != sizeof(struct ieee80211_channel_sw_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->ch_switch_ie = (void *)pos; break; case WLAN_EID_EXT_CHANSWITCH_ANN: if (elen != sizeof(struct ieee80211_ext_chansw_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->ext_chansw_ie = (void *)pos; break; case WLAN_EID_SECONDARY_CHANNEL_OFFSET: if (params->mode < IEEE80211_CONN_MODE_HT) break; if (elen != sizeof(struct ieee80211_sec_chan_offs_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->sec_chan_offs = (void *)pos; break; case WLAN_EID_CHAN_SWITCH_PARAM: if (elen < sizeof(*elems->mesh_chansw_params_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->mesh_chansw_params_ie = (void *)pos; break; case WLAN_EID_WIDE_BW_CHANNEL_SWITCH: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (!params->action) { elem_parse_failed = IEEE80211_PARSE_ERR_UNEXPECTED_ELEM; break; } if (elen < sizeof(*elems->wide_bw_chansw_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->wide_bw_chansw_ie = (void *)pos; break; case WLAN_EID_CHANNEL_SWITCH_WRAPPER: if (params->mode < IEEE80211_CONN_MODE_VHT) break; if (params->action) { elem_parse_failed = IEEE80211_PARSE_ERR_UNEXPECTED_ELEM; break; } /* * This is a bit tricky, but as we only care about * a few elements, parse them out manually. */ subelem = cfg80211_find_elem(WLAN_EID_WIDE_BW_CHANNEL_SWITCH, pos, elen); if (subelem) { if (subelem->datalen >= sizeof(*elems->wide_bw_chansw_ie)) elems->wide_bw_chansw_ie = (void *)subelem->data; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; } if (params->mode < IEEE80211_CONN_MODE_EHT) break; subelem = cfg80211_find_ext_elem(WLAN_EID_EXT_BANDWIDTH_INDICATION, pos, elen); if (subelem) { const void *edata = subelem->data + 1; u8 edatalen = subelem->datalen - 1; if (ieee80211_bandwidth_indication_size_ok(edata, edatalen)) elems->bandwidth_indication = edata; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; } subelem = cfg80211_find_ext_elem(WLAN_EID_TX_POWER_ENVELOPE, pos, elen); if (subelem) ieee80211_parse_tpe(&elems->csa_tpe, subelem->data + 1, subelem->datalen - 1); break; case WLAN_EID_COUNTRY: elems->country_elem = pos; elems->country_elem_len = elen; break; case WLAN_EID_PWR_CONSTRAINT: if (elen != 1) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->pwr_constr_elem = pos; break; case WLAN_EID_CISCO_VENDOR_SPECIFIC: /* Lots of different options exist, but we only care * about the Dynamic Transmit Power Control element. * First check for the Cisco OUI, then for the DTPC * tag (0x00). */ if (elen < 4) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } if (pos[0] != 0x00 || pos[1] != 0x40 || pos[2] != 0x96 || pos[3] != 0x00) break; if (elen != 6) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } if (calc_crc) crc = crc32_be(crc, pos - 2, elen + 2); elems->cisco_dtpc_elem = pos; break; case WLAN_EID_ADDBA_EXT: if (elen < sizeof(struct ieee80211_addba_ext_ie)) { elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; } elems->addba_ext_ie = (void *)pos; break; case WLAN_EID_TIMEOUT_INTERVAL: if (elen >= sizeof(struct ieee80211_timeout_interval_ie)) elems->timeout_int = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_BSS_MAX_IDLE_PERIOD: if (elen >= sizeof(*elems->max_idle_period_ie)) elems->max_idle_period_ie = (void *)pos; break; case WLAN_EID_RSNX: elems->rsnx = pos; elems->rsnx_len = elen; break; case WLAN_EID_TX_POWER_ENVELOPE: if (params->mode < IEEE80211_CONN_MODE_HE) break; ieee80211_parse_tpe(&elems->tpe, pos, elen); break; case WLAN_EID_EXTENSION: ieee80211_parse_extension_element(calc_crc ? &crc : NULL, elem, elems_parse, params); break; case WLAN_EID_S1G_CAPABILITIES: if (params->mode != IEEE80211_CONN_MODE_S1G) break; if (elen >= sizeof(*elems->s1g_capab)) elems->s1g_capab = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_S1G_OPERATION: if (params->mode != IEEE80211_CONN_MODE_S1G) break; if (elen == sizeof(*elems->s1g_oper)) elems->s1g_oper = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_S1G_BCN_COMPAT: if (params->mode != IEEE80211_CONN_MODE_S1G) break; if (elen == sizeof(*elems->s1g_bcn_compat)) elems->s1g_bcn_compat = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; case WLAN_EID_AID_RESPONSE: if (params->mode != IEEE80211_CONN_MODE_S1G) break; if (elen == sizeof(struct ieee80211_aid_response_ie)) elems->aid_resp = (void *)pos; else elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; break; default: break; } if (elem_parse_failed) elems->parse_error |= elem_parse_failed; else __set_bit(id, seen_elems); } if (!for_each_element_completed(elem, params->start, params->len)) elems->parse_error |= IEEE80211_PARSE_ERR_INVALID_END; return crc; } static size_t ieee802_11_find_bssid_profile(const u8 *start, size_t len, struct ieee802_11_elems *elems, struct cfg80211_bss *bss, u8 *nontransmitted_profile) { const struct element *elem, *sub; size_t profile_len = 0; bool found = false; if (!bss || !bss->transmitted_bss) return profile_len; for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, len) { if (elem->datalen < 2) continue; if (elem->data[0] < 1 || elem->data[0] > 8) continue; for_each_element(sub, elem->data + 1, elem->datalen - 1) { u8 new_bssid[ETH_ALEN]; const u8 *index; if (sub->id != 0 || sub->datalen < 4) { /* not a valid BSS profile */ continue; } if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP || sub->data[1] != 2) { /* The first element of the * Nontransmitted BSSID Profile is not * the Nontransmitted BSSID Capability * element. */ continue; } memset(nontransmitted_profile, 0, len); profile_len = cfg80211_merge_profile(start, len, elem, sub, nontransmitted_profile, len); /* found a Nontransmitted BSSID Profile */ index = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, nontransmitted_profile, profile_len); if (!index || index[1] < 1 || index[2] == 0) { /* Invalid MBSSID Index element */ continue; } cfg80211_gen_new_bssid(bss->transmitted_bss->bssid, elem->data[0], index[2], new_bssid); if (ether_addr_equal(new_bssid, bss->bssid)) { found = true; elems->bssid_index_len = index[1]; elems->bssid_index = (void *)&index[2]; break; } } } return found ? profile_len : 0; } static void ieee80211_mle_get_sta_prof(struct ieee80211_elems_parse *elems_parse, u8 link_id) { struct ieee802_11_elems *elems = &elems_parse->elems; const struct ieee80211_multi_link_elem *ml = elems->ml_basic; ssize_t ml_len = elems->ml_basic_len; const struct element *sub; for_each_mle_subelement(sub, (u8 *)ml, ml_len) { struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; ssize_t sta_prof_len; u16 control; if (sub->id != IEEE80211_MLE_SUBELEM_PER_STA_PROFILE) continue; if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data, sub->datalen)) return; control = le16_to_cpu(prof->control); if (link_id != u16_get_bits(control, IEEE80211_MLE_STA_CONTROL_LINK_ID)) continue; if (!(control & IEEE80211_MLE_STA_CONTROL_COMPLETE_PROFILE)) return; /* the sub element can be fragmented */ sta_prof_len = cfg80211_defragment_element(sub, (u8 *)ml, ml_len, elems_parse->scratch_pos, elems_parse->scratch + elems_parse->scratch_len - elems_parse->scratch_pos, IEEE80211_MLE_SUBELEM_FRAGMENT); if (sta_prof_len < 0) return; elems->prof = (void *)elems_parse->scratch_pos; elems->sta_prof_len = sta_prof_len; elems_parse->scratch_pos += sta_prof_len; return; } } static void ieee80211_mle_parse_link(struct ieee80211_elems_parse *elems_parse, struct ieee80211_elems_parse_params *params) { struct ieee802_11_elems *elems = &elems_parse->elems; struct ieee80211_mle_per_sta_profile *prof; struct ieee80211_elems_parse_params sub = { .mode = params->mode, .action = params->action, .from_ap = params->from_ap, .link_id = -1, }; ssize_t ml_len = elems->ml_basic_len; const struct element *non_inherit = NULL; const u8 *end; ml_len = cfg80211_defragment_element(elems_parse->ml_basic_elem, elems->ie_start, elems->total_len, elems_parse->scratch_pos, elems_parse->scratch + elems_parse->scratch_len - elems_parse->scratch_pos, WLAN_EID_FRAGMENT); if (ml_len < 0) return; elems->ml_basic = (const void *)elems_parse->scratch_pos; elems->ml_basic_len = ml_len; elems_parse->scratch_pos += ml_len; if (params->link_id == -1) return; ieee80211_mle_get_sta_prof(elems_parse, params->link_id); prof = elems->prof; if (!prof) return; /* check if we have the 4 bytes for the fixed part in assoc response */ if (elems->sta_prof_len < sizeof(*prof) + prof->sta_info_len - 1 + 4) { elems->prof = NULL; elems->sta_prof_len = 0; return; } /* * Skip the capability information and the status code that are expected * as part of the station profile in association response frames. Note * the -1 is because the 'sta_info_len' is accounted to as part of the * per-STA profile, but not part of the 'u8 variable[]' portion. */ sub.start = prof->variable + prof->sta_info_len - 1 + 4; end = (const u8 *)prof + elems->sta_prof_len; sub.len = end - sub.start; non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, sub.start, sub.len); _ieee802_11_parse_elems_full(&sub, elems_parse, non_inherit); } static void ieee80211_mle_defrag_reconf(struct ieee80211_elems_parse *elems_parse) { struct ieee802_11_elems *elems = &elems_parse->elems; ssize_t ml_len; ml_len = cfg80211_defragment_element(elems_parse->ml_reconf_elem, elems->ie_start, elems->total_len, elems_parse->scratch_pos, elems_parse->scratch + elems_parse->scratch_len - elems_parse->scratch_pos, WLAN_EID_FRAGMENT); if (ml_len < 0) return; elems->ml_reconf = (void *)elems_parse->scratch_pos; elems->ml_reconf_len = ml_len; elems_parse->scratch_pos += ml_len; } static void ieee80211_mle_defrag_epcs(struct ieee80211_elems_parse *elems_parse) { struct ieee802_11_elems *elems = &elems_parse->elems; ssize_t ml_len; ml_len = cfg80211_defragment_element(elems_parse->ml_epcs_elem, elems->ie_start, elems->total_len, elems_parse->scratch_pos, elems_parse->scratch + elems_parse->scratch_len - elems_parse->scratch_pos, WLAN_EID_FRAGMENT); if (ml_len < 0) return; elems->ml_epcs = (void *)elems_parse->scratch_pos; elems->ml_epcs_len = ml_len; elems_parse->scratch_pos += ml_len; } struct ieee802_11_elems * ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) { struct ieee80211_elems_parse *elems_parse; struct ieee802_11_elems *elems; const struct element *non_inherit = NULL; u8 *nontransmitted_profile; int nontransmitted_profile_len = 0; size_t scratch_len = 3 * params->len; BUILD_BUG_ON(offsetof(typeof(*elems_parse), elems) != 0); elems_parse = kzalloc(struct_size(elems_parse, scratch, scratch_len), GFP_ATOMIC); if (!elems_parse) return NULL; elems_parse->scratch_len = scratch_len; elems_parse->scratch_pos = elems_parse->scratch; elems = &elems_parse->elems; elems->ie_start = params->start; elems->total_len = params->len; /* set all TPE entries to unlimited (but invalid) */ ieee80211_clear_tpe(&elems->tpe); ieee80211_clear_tpe(&elems->csa_tpe); nontransmitted_profile = elems_parse->scratch_pos; nontransmitted_profile_len = ieee802_11_find_bssid_profile(params->start, params->len, elems, params->bss, nontransmitted_profile); elems_parse->scratch_pos += nontransmitted_profile_len; non_inherit = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, nontransmitted_profile, nontransmitted_profile_len); elems->crc = _ieee802_11_parse_elems_full(params, elems_parse, non_inherit); /* Override with nontransmitted profile, if found */ if (nontransmitted_profile_len) { struct ieee80211_elems_parse_params sub = { .mode = params->mode, .start = nontransmitted_profile, .len = nontransmitted_profile_len, .action = params->action, .link_id = params->link_id, }; _ieee802_11_parse_elems_full(&sub, elems_parse, NULL); } ieee80211_mle_parse_link(elems_parse, params); ieee80211_mle_defrag_reconf(elems_parse); ieee80211_mle_defrag_epcs(elems_parse); if (elems->tim && !elems->parse_error) { const struct ieee80211_tim_ie *tim_ie = elems->tim; elems->dtim_period = tim_ie->dtim_period; elems->dtim_count = tim_ie->dtim_count; } /* Override DTIM period and count if needed */ if (elems->bssid_index && elems->bssid_index_len >= offsetofend(struct ieee80211_bssid_index, dtim_period)) elems->dtim_period = elems->bssid_index->dtim_period; if (elems->bssid_index && elems->bssid_index_len >= offsetofend(struct ieee80211_bssid_index, dtim_count)) elems->dtim_count = elems->bssid_index->dtim_count; return elems; } EXPORT_SYMBOL_IF_KUNIT(ieee802_11_parse_elems_full); int ieee80211_parse_bitrates(enum nl80211_chan_width width, const struct ieee80211_supported_band *sband, const u8 *srates, int srates_len, u32 *rates) { u32 rate_flags = ieee80211_chanwidth_rate_flags(width); struct ieee80211_rate *br; int brate, rate, i, j, count = 0; *rates = 0; for (i = 0; i < srates_len; i++) { rate = srates[i] & 0x7f; for (j = 0; j < sband->n_bitrates; j++) { br = &sband->bitrates[j]; if ((rate_flags & br->flags) != rate_flags) continue; brate = DIV_ROUND_UP(br->bitrate, 5); if (brate == rate) { *rates |= BIT(j); count++; break; } } } return count; }
1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2002 Petko Manolov (petkan@users.sourceforge.net) */ #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/usb.h> #include <linux/uaccess.h> /* Version Information */ #define DRIVER_VERSION "v0.6.2 (2004/08/27)" #define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>" #define DRIVER_DESC "rtl8150 based usb-ethernet driver" #define IDR 0x0120 #define MAR 0x0126 #define CR 0x012e #define TCR 0x012f #define RCR 0x0130 #define TSR 0x0132 #define RSR 0x0133 #define CON0 0x0135 #define CON1 0x0136 #define MSR 0x0137 #define PHYADD 0x0138 #define PHYDAT 0x0139 #define PHYCNT 0x013b #define GPPC 0x013d #define BMCR 0x0140 #define BMSR 0x0142 #define ANAR 0x0144 #define ANLP 0x0146 #define AER 0x0148 #define CSCR 0x014C /* This one has the link status */ #define CSCR_LINK_STATUS (1 << 3) #define IDR_EEPROM 0x1202 #define PHY_READ 0 #define PHY_WRITE 0x20 #define PHY_GO 0x40 #define MII_TIMEOUT 10 #define INTBUFSIZE 8 #define RTL8150_REQT_READ 0xc0 #define RTL8150_REQT_WRITE 0x40 #define RTL8150_REQ_GET_REGS 0x05 #define RTL8150_REQ_SET_REGS 0x05 /* Transmit status register errors */ #define TSR_ECOL (1<<5) #define TSR_LCOL (1<<4) #define TSR_LOSS_CRS (1<<3) #define TSR_JBR (1<<2) #define TSR_ERRORS (TSR_ECOL | TSR_LCOL | TSR_LOSS_CRS | TSR_JBR) /* Receive status register errors */ #define RSR_CRC (1<<2) #define RSR_FAE (1<<1) #define RSR_ERRORS (RSR_CRC | RSR_FAE) /* Media status register definitions */ #define MSR_DUPLEX (1<<4) #define MSR_SPEED (1<<3) #define MSR_LINK (1<<2) /* USB endpoints */ enum rtl8150_usb_ep { RTL8150_USB_EP_CONTROL = 0, RTL8150_USB_EP_BULK_IN = 1, RTL8150_USB_EP_BULK_OUT = 2, RTL8150_USB_EP_INT_IN = 3, }; /* Interrupt pipe data */ #define INT_TSR 0x00 #define INT_RSR 0x01 #define INT_MSR 0x02 #define INT_WAKSR 0x03 #define INT_TXOK_CNT 0x04 #define INT_RXLOST_CNT 0x05 #define INT_CRERR_CNT 0x06 #define INT_COL_CNT 0x07 #define RTL8150_MTU 1540 #define RTL8150_TX_TIMEOUT (HZ) #define RX_SKB_POOL_SIZE 4 /* rtl8150 flags */ #define RTL8150_HW_CRC 0 #define RX_REG_SET 1 #define RTL8150_UNPLUG 2 #define RX_URB_FAIL 3 /* Define these values to match your device */ #define VENDOR_ID_REALTEK 0x0bda #define VENDOR_ID_MELCO 0x0411 #define VENDOR_ID_MICRONET 0x3980 #define VENDOR_ID_LONGSHINE 0x07b8 #define VENDOR_ID_OQO 0x1557 #define VENDOR_ID_ZYXEL 0x0586 #define PRODUCT_ID_RTL8150 0x8150 #define PRODUCT_ID_LUAKTX 0x0012 #define PRODUCT_ID_LCS8138TX 0x401a #define PRODUCT_ID_SP128AR 0x0003 #define PRODUCT_ID_PRESTIGE 0x401a #undef EEPROM_WRITE /* table of devices that work with this driver */ static const struct usb_device_id rtl8150_table[] = { {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_MELCO, PRODUCT_ID_LUAKTX)}, {USB_DEVICE(VENDOR_ID_MICRONET, PRODUCT_ID_SP128AR)}, {USB_DEVICE(VENDOR_ID_LONGSHINE, PRODUCT_ID_LCS8138TX)}, {USB_DEVICE(VENDOR_ID_OQO, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_ZYXEL, PRODUCT_ID_PRESTIGE)}, {} }; MODULE_DEVICE_TABLE(usb, rtl8150_table); struct rtl8150 { unsigned long flags; struct usb_device *udev; struct tasklet_struct tl; struct net_device *netdev; struct urb *rx_urb, *tx_urb, *intr_urb; struct sk_buff *tx_skb, *rx_skb; struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE]; spinlock_t rx_pool_lock; struct usb_ctrlrequest dr; int intr_interval; u8 *intr_buff; u8 phy; }; typedef struct rtl8150 rtl8150_t; struct async_req { struct usb_ctrlrequest dr; u16 rx_creg; }; static const char driver_name [] = "rtl8150"; /* ** ** device related part of the code ** */ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { return usb_control_msg_recv(dev->udev, 0, RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, indx, 0, data, size, 1000, GFP_NOIO); } static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) { return usb_control_msg_send(dev->udev, 0, RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, indx, 0, data, size, 1000, GFP_NOIO); } static void async_set_reg_cb(struct urb *urb) { struct async_req *req = (struct async_req *)urb->context; int status = urb->status; if (status < 0) dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status); kfree(req); usb_free_urb(urb); } static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg) { int res = -ENOMEM; struct urb *async_urb; struct async_req *req; req = kmalloc(sizeof(struct async_req), GFP_ATOMIC); if (req == NULL) return res; async_urb = usb_alloc_urb(0, GFP_ATOMIC); if (async_urb == NULL) { kfree(req); return res; } req->rx_creg = cpu_to_le16(reg); req->dr.bRequestType = RTL8150_REQT_WRITE; req->dr.bRequest = RTL8150_REQ_SET_REGS; req->dr.wIndex = 0; req->dr.wValue = cpu_to_le16(indx); req->dr.wLength = cpu_to_le16(size); usb_fill_control_urb(async_urb, dev->udev, usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr, &req->rx_creg, size, async_set_reg_cb, req); res = usb_submit_urb(async_urb, GFP_ATOMIC); if (res) { if (res == -ENODEV) netif_device_detach(dev->netdev); dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res); } return res; } static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg) { int i; u8 data[3], tmp; data[0] = phy; data[1] = data[2] = 0; tmp = indx | PHY_READ | PHY_GO; i = 0; set_registers(dev, PHYADD, sizeof(data), data); set_registers(dev, PHYCNT, 1, &tmp); do { get_registers(dev, PHYCNT, 1, data); } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); if (i <= MII_TIMEOUT) { get_registers(dev, PHYDAT, 2, data); *reg = data[0] | (data[1] << 8); return 0; } else return 1; } static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg) { int i; u8 data[3], tmp; data[0] = phy; data[1] = reg & 0xff; data[2] = (reg >> 8) & 0xff; tmp = indx | PHY_WRITE | PHY_GO; i = 0; set_registers(dev, PHYADD, sizeof(data), data); set_registers(dev, PHYCNT, 1, &tmp); do { get_registers(dev, PHYCNT, 1, data); } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); if (i <= MII_TIMEOUT) return 0; else return 1; } static void set_ethernet_addr(rtl8150_t *dev) { u8 node_id[ETH_ALEN]; int ret; ret = get_registers(dev, IDR, sizeof(node_id), node_id); if (!ret) { eth_hw_addr_set(dev->netdev, node_id); } else { eth_hw_addr_random(dev->netdev); netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n", dev->netdev->dev_addr); } } static int rtl8150_set_mac_address(struct net_device *netdev, void *p) { struct sockaddr *addr = p; rtl8150_t *dev = netdev_priv(netdev); if (netif_running(netdev)) return -EBUSY; eth_hw_addr_set(netdev, addr->sa_data); netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr); /* Set the IDR registers. */ set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr); #ifdef EEPROM_WRITE { int i; u8 cr; /* Get the CR contents. */ get_registers(dev, CR, 1, &cr); /* Set the WEPROM bit (eeprom write enable). */ cr |= 0x20; set_registers(dev, CR, 1, &cr); /* Write the MAC address into eeprom. Eeprom writes must be word-sized, so we need to split them up. */ for (i = 0; i * 2 < netdev->addr_len; i++) { set_registers(dev, IDR_EEPROM + (i * 2), 2, netdev->dev_addr + (i * 2)); } /* Clear the WEPROM bit (preventing accidental eeprom writes). */ cr &= 0xdf; set_registers(dev, CR, 1, &cr); } #endif return 0; } static int rtl8150_reset(rtl8150_t * dev) { u8 data = 0x10; int i = HZ; set_registers(dev, CR, 1, &data); do { get_registers(dev, CR, 1, &data); } while ((data & 0x10) && --i); return (i > 0) ? 1 : 0; } static int alloc_all_urbs(rtl8150_t * dev) { dev->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->rx_urb) return 0; dev->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->tx_urb) { usb_free_urb(dev->rx_urb); return 0; } dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->intr_urb) { usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); return 0; } return 1; } static void free_all_urbs(rtl8150_t * dev) { usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); usb_free_urb(dev->intr_urb); } static void unlink_all_urbs(rtl8150_t * dev) { usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->tx_urb); usb_kill_urb(dev->intr_urb); } static inline struct sk_buff *pull_skb(rtl8150_t *dev) { struct sk_buff *skb; int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) { if (dev->rx_skb_pool[i]) { skb = dev->rx_skb_pool[i]; dev->rx_skb_pool[i] = NULL; return skb; } } return NULL; } static void read_bulk_callback(struct urb *urb) { rtl8150_t *dev; unsigned pkt_len, res; struct sk_buff *skb; struct net_device *netdev; int status = urb->status; int result; unsigned long flags; dev = urb->context; if (!dev) return; if (test_bit(RTL8150_UNPLUG, &dev->flags)) return; netdev = dev->netdev; if (!netif_device_present(netdev)) return; switch (status) { case 0: break; case -ENOENT: return; /* the urb is in unlink state */ case -ETIME: if (printk_ratelimit()) dev_warn(&urb->dev->dev, "may be reset is needed?..\n"); goto goon; default: if (printk_ratelimit()) dev_warn(&urb->dev->dev, "Rx status %d\n", status); goto goon; } if (!dev->rx_skb) goto resched; /* protect against short packets (tell me why we got some?!?) */ if (urb->actual_length < 4) goto goon; res = urb->actual_length; pkt_len = res - 4; skb_put(dev->rx_skb, pkt_len); dev->rx_skb->protocol = eth_type_trans(dev->rx_skb, netdev); netif_rx(dev->rx_skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += pkt_len; spin_lock_irqsave(&dev->rx_pool_lock, flags); skb = pull_skb(dev); spin_unlock_irqrestore(&dev->rx_pool_lock, flags); if (!skb) goto resched; dev->rx_skb = skb; goon: usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); result = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); if (result == -ENODEV) netif_device_detach(dev->netdev); else if (result) { set_bit(RX_URB_FAIL, &dev->flags); goto resched; } else { clear_bit(RX_URB_FAIL, &dev->flags); } return; resched: tasklet_schedule(&dev->tl); } static void write_bulk_callback(struct urb *urb) { rtl8150_t *dev; int status = urb->status; dev = urb->context; if (!dev) return; dev_kfree_skb_irq(dev->tx_skb); if (!netif_device_present(dev->netdev)) return; if (status) dev_info(&urb->dev->dev, "%s: Tx status %d\n", dev->netdev->name, status); netif_trans_update(dev->netdev); netif_wake_queue(dev->netdev); } static void intr_callback(struct urb *urb) { rtl8150_t *dev; __u8 *d; int status = urb->status; int res; dev = urb->context; if (!dev) return; switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: dev_info(&urb->dev->dev, "%s: intr status %d\n", dev->netdev->name, status); goto resubmit; } d = urb->transfer_buffer; if (d[0] & TSR_ERRORS) { dev->netdev->stats.tx_errors++; if (d[INT_TSR] & (TSR_ECOL | TSR_JBR)) dev->netdev->stats.tx_aborted_errors++; if (d[INT_TSR] & TSR_LCOL) dev->netdev->stats.tx_window_errors++; if (d[INT_TSR] & TSR_LOSS_CRS) dev->netdev->stats.tx_carrier_errors++; } /* Report link status changes to the network stack */ if ((d[INT_MSR] & MSR_LINK) == 0) { if (netif_carrier_ok(dev->netdev)) { netif_carrier_off(dev->netdev); netdev_dbg(dev->netdev, "%s: LINK LOST\n", __func__); } } else { if (!netif_carrier_ok(dev->netdev)) { netif_carrier_on(dev->netdev); netdev_dbg(dev->netdev, "%s: LINK CAME BACK\n", __func__); } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res == -ENODEV) netif_device_detach(dev->netdev); else if (res) dev_err(&dev->udev->dev, "can't resubmit intr, %s-%s/input0, status %d\n", dev->udev->bus->bus_name, dev->udev->devpath, res); } static int rtl8150_suspend(struct usb_interface *intf, pm_message_t message) { rtl8150_t *dev = usb_get_intfdata(intf); netif_device_detach(dev->netdev); if (netif_running(dev->netdev)) { usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->intr_urb); } return 0; } static int rtl8150_resume(struct usb_interface *intf) { rtl8150_t *dev = usb_get_intfdata(intf); netif_device_attach(dev->netdev); if (netif_running(dev->netdev)) { dev->rx_urb->status = 0; dev->rx_urb->actual_length = 0; read_bulk_callback(dev->rx_urb); dev->intr_urb->status = 0; dev->intr_urb->actual_length = 0; intr_callback(dev->intr_urb); } return 0; } /* ** ** network related part of the code ** */ static void fill_skb_pool(rtl8150_t *dev) { struct sk_buff *skb; int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) { if (dev->rx_skb_pool[i]) continue; skb = dev_alloc_skb(RTL8150_MTU + 2); if (!skb) { return; } skb_reserve(skb, 2); dev->rx_skb_pool[i] = skb; } } static void free_skb_pool(rtl8150_t *dev) { int i; for (i = 0; i < RX_SKB_POOL_SIZE; i++) dev_kfree_skb(dev->rx_skb_pool[i]); } static void rx_fixup(struct tasklet_struct *t) { struct rtl8150 *dev = from_tasklet(dev, t, tl); struct sk_buff *skb; int status; spin_lock_irq(&dev->rx_pool_lock); fill_skb_pool(dev); spin_unlock_irq(&dev->rx_pool_lock); if (test_bit(RX_URB_FAIL, &dev->flags)) if (dev->rx_skb) goto try_again; spin_lock_irq(&dev->rx_pool_lock); skb = pull_skb(dev); spin_unlock_irq(&dev->rx_pool_lock); if (skb == NULL) goto tlsched; dev->rx_skb = skb; usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); try_again: status = usb_submit_urb(dev->rx_urb, GFP_ATOMIC); if (status == -ENODEV) { netif_device_detach(dev->netdev); } else if (status) { set_bit(RX_URB_FAIL, &dev->flags); goto tlsched; } else { clear_bit(RX_URB_FAIL, &dev->flags); } return; tlsched: tasklet_schedule(&dev->tl); } static int enable_net_traffic(rtl8150_t * dev) { u8 cr, tcr, rcr, msr; if (!rtl8150_reset(dev)) { dev_warn(&dev->udev->dev, "device reset failed\n"); } /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */ rcr = 0x9e; tcr = 0xd8; cr = 0x0c; if (!(rcr & 0x80)) set_bit(RTL8150_HW_CRC, &dev->flags); set_registers(dev, RCR, 1, &rcr); set_registers(dev, TCR, 1, &tcr); set_registers(dev, CR, 1, &cr); get_registers(dev, MSR, 1, &msr); return 0; } static void disable_net_traffic(rtl8150_t * dev) { u8 cr; get_registers(dev, CR, 1, &cr); cr &= 0xf3; set_registers(dev, CR, 1, &cr); } static void rtl8150_tx_timeout(struct net_device *netdev, unsigned int txqueue) { rtl8150_t *dev = netdev_priv(netdev); dev_warn(&netdev->dev, "Tx timeout.\n"); usb_unlink_urb(dev->tx_urb); netdev->stats.tx_errors++; } static void rtl8150_set_multicast(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); u16 rx_creg = 0x9e; netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { rx_creg |= 0x0001; dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { rx_creg &= 0xfffe; rx_creg |= 0x0002; dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ rx_creg &= 0x00fc; } async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg); netif_wake_queue(netdev); } static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb, struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); int count, res; netif_stop_queue(netdev); count = (skb->len < 60) ? 60 : skb->len; count = (count & 0x3f) ? count : count + 1; dev->tx_skb = skb; usb_fill_bulk_urb(dev->tx_urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), skb->data, count, write_bulk_callback, dev); if ((res = usb_submit_urb(dev->tx_urb, GFP_ATOMIC))) { /* Can we get/handle EPIPE here? */ if (res == -ENODEV) netif_device_detach(dev->netdev); else { dev_warn(&netdev->dev, "failed tx_urb %d\n", res); netdev->stats.tx_errors++; netif_start_queue(netdev); } } else { netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; netif_trans_update(netdev); } return NETDEV_TX_OK; } static void set_carrier(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); short tmp; get_registers(dev, CSCR, 2, &tmp); if (tmp & CSCR_LINK_STATUS) netif_carrier_on(netdev); else netif_carrier_off(netdev); } static int rtl8150_open(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); int res; if (dev->rx_skb == NULL) dev->rx_skb = pull_skb(dev); if (!dev->rx_skb) return -ENOMEM; set_registers(dev, IDR, 6, netdev->dev_addr); usb_fill_bulk_urb(dev->rx_urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), dev->rx_skb->data, RTL8150_MTU, read_bulk_callback, dev); if ((res = usb_submit_urb(dev->rx_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(dev->netdev); dev_warn(&netdev->dev, "rx_urb submit failed: %d\n", res); return res; } usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 3), dev->intr_buff, INTBUFSIZE, intr_callback, dev, dev->intr_interval); if ((res = usb_submit_urb(dev->intr_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(dev->netdev); dev_warn(&netdev->dev, "intr_urb submit failed: %d\n", res); usb_kill_urb(dev->rx_urb); return res; } enable_net_traffic(dev); set_carrier(netdev); netif_start_queue(netdev); return res; } static int rtl8150_close(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); netif_stop_queue(netdev); if (!test_bit(RTL8150_UNPLUG, &dev->flags)) disable_net_traffic(dev); unlink_all_urbs(dev); return 0; } static void rtl8150_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { rtl8150_t *dev = netdev_priv(netdev); strscpy(info->driver, driver_name, sizeof(info->driver)); strscpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info)); } static int rtl8150_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *ecmd) { rtl8150_t *dev = netdev_priv(netdev); short lpa = 0; short bmcr = 0; u32 supported; supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); ecmd->base.port = PORT_TP; ecmd->base.phy_address = dev->phy; get_registers(dev, BMCR, 2, &bmcr); get_registers(dev, ANLP, 2, &lpa); if (bmcr & BMCR_ANENABLE) { u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ? SPEED_100 : SPEED_10); ecmd->base.speed = speed; ecmd->base.autoneg = AUTONEG_ENABLE; if (speed == SPEED_100) ecmd->base.duplex = (lpa & LPA_100FULL) ? DUPLEX_FULL : DUPLEX_HALF; else ecmd->base.duplex = (lpa & LPA_10FULL) ? DUPLEX_FULL : DUPLEX_HALF; } else { ecmd->base.autoneg = AUTONEG_DISABLE; ecmd->base.speed = ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10); ecmd->base.duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, supported); return 0; } static const struct ethtool_ops ops = { .get_drvinfo = rtl8150_get_drvinfo, .get_link = ethtool_op_get_link, .get_link_ksettings = rtl8150_get_link_ksettings, }; static int rtl8150_siocdevprivate(struct net_device *netdev, struct ifreq *rq, void __user *udata, int cmd) { rtl8150_t *dev = netdev_priv(netdev); u16 *data = (u16 *) & rq->ifr_ifru; int res = 0; switch (cmd) { case SIOCDEVPRIVATE: data[0] = dev->phy; fallthrough; case SIOCDEVPRIVATE + 1: read_mii_word(dev, dev->phy, (data[1] & 0x1f), &data[3]); break; case SIOCDEVPRIVATE + 2: if (!capable(CAP_NET_ADMIN)) return -EPERM; write_mii_word(dev, dev->phy, (data[1] & 0x1f), data[2]); break; default: res = -EOPNOTSUPP; } return res; } static const struct net_device_ops rtl8150_netdev_ops = { .ndo_open = rtl8150_open, .ndo_stop = rtl8150_close, .ndo_siocdevprivate = rtl8150_siocdevprivate, .ndo_start_xmit = rtl8150_start_xmit, .ndo_tx_timeout = rtl8150_tx_timeout, .ndo_set_rx_mode = rtl8150_set_multicast, .ndo_set_mac_address = rtl8150_set_mac_address, .ndo_validate_addr = eth_validate_addr, }; static int rtl8150_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); rtl8150_t *dev; struct net_device *netdev; static const u8 bulk_ep_addr[] = { RTL8150_USB_EP_BULK_IN | USB_DIR_IN, RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT, 0}; static const u8 int_ep_addr[] = { RTL8150_USB_EP_INT_IN | USB_DIR_IN, 0}; netdev = alloc_etherdev(sizeof(rtl8150_t)); if (!netdev) return -ENOMEM; dev = netdev_priv(netdev); dev->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL); if (!dev->intr_buff) { free_netdev(netdev); return -ENOMEM; } /* Verify that all required endpoints are present */ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || !usb_check_int_endpoints(intf, int_ep_addr)) { dev_err(&intf->dev, "couldn't find required endpoints\n"); goto out; } tasklet_setup(&dev->tl, rx_fixup); spin_lock_init(&dev->rx_pool_lock); dev->udev = udev; dev->netdev = netdev; netdev->netdev_ops = &rtl8150_netdev_ops; netdev->watchdog_timeo = RTL8150_TX_TIMEOUT; netdev->ethtool_ops = &ops; dev->intr_interval = 100; /* 100ms */ if (!alloc_all_urbs(dev)) { dev_err(&intf->dev, "out of memory\n"); goto out; } if (!rtl8150_reset(dev)) { dev_err(&intf->dev, "couldn't reset the device\n"); goto out1; } fill_skb_pool(dev); set_ethernet_addr(dev); usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); if (register_netdev(netdev) != 0) { dev_err(&intf->dev, "couldn't register the device\n"); goto out2; } dev_info(&intf->dev, "%s: rtl8150 is detected\n", netdev->name); return 0; out2: usb_set_intfdata(intf, NULL); free_skb_pool(dev); out1: free_all_urbs(dev); out: kfree(dev->intr_buff); free_netdev(netdev); return -EIO; } static void rtl8150_disconnect(struct usb_interface *intf) { rtl8150_t *dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (dev) { set_bit(RTL8150_UNPLUG, &dev->flags); tasklet_kill(&dev->tl); unregister_netdev(dev->netdev); unlink_all_urbs(dev); free_all_urbs(dev); free_skb_pool(dev); dev_kfree_skb(dev->rx_skb); kfree(dev->intr_buff); free_netdev(dev->netdev); } } static struct usb_driver rtl8150_driver = { .name = driver_name, .probe = rtl8150_probe, .disconnect = rtl8150_disconnect, .id_table = rtl8150_table, .suspend = rtl8150_suspend, .resume = rtl8150_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rtl8150_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
9 9 9 3 3 3 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat common functions for device specific drivers * * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ #include <linux/hid.h> #include <linux/slab.h> #include <linux/module.h> #include "hid-roccat-common.h" static inline uint16_t roccat_common2_feature_report(uint8_t report_id) { return 0x300 | report_id; } int roccat_common2_receive(struct usb_device *usb_dev, uint report_id, void *data, uint size) { char *buf; int len; buf = kmalloc(size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0), HID_REQ_GET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, roccat_common2_feature_report(report_id), 0, buf, size, USB_CTRL_SET_TIMEOUT); memcpy(data, buf, size); kfree(buf); return ((len < 0) ? len : ((len != size) ? -EIO : 0)); } EXPORT_SYMBOL_GPL(roccat_common2_receive); int roccat_common2_send(struct usb_device *usb_dev, uint report_id, void const *data, uint size) { char *buf; int len; buf = kmemdup(data, size, GFP_KERNEL); if (buf == NULL) return -ENOMEM; len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0), HID_REQ_SET_REPORT, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT, roccat_common2_feature_report(report_id), 0, buf, size, USB_CTRL_SET_TIMEOUT); kfree(buf); return ((len < 0) ? len : ((len != size) ? -EIO : 0)); } EXPORT_SYMBOL_GPL(roccat_common2_send); enum roccat_common2_control_states { ROCCAT_COMMON_CONTROL_STATUS_CRITICAL = 0, ROCCAT_COMMON_CONTROL_STATUS_OK = 1, ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2, ROCCAT_COMMON_CONTROL_STATUS_BUSY = 3, ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW = 4, }; static int roccat_common2_receive_control_status(struct usb_device *usb_dev) { int retval; struct roccat_common2_control control; do { msleep(50); retval = roccat_common2_receive(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL, &control, sizeof(struct roccat_common2_control)); if (retval) return retval; switch (control.value) { case ROCCAT_COMMON_CONTROL_STATUS_OK: return 0; case ROCCAT_COMMON_CONTROL_STATUS_BUSY: msleep(500); continue; case ROCCAT_COMMON_CONTROL_STATUS_INVALID: case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL: case ROCCAT_COMMON_CONTROL_STATUS_CRITICAL_NEW: return -EINVAL; default: dev_err(&usb_dev->dev, "roccat_common2_receive_control_status: " "unknown response value 0x%x\n", control.value); return -EINVAL; } } while (1); } int roccat_common2_send_with_status(struct usb_device *usb_dev, uint command, void const *buf, uint size) { int retval; retval = roccat_common2_send(usb_dev, command, buf, size); if (retval) return retval; msleep(100); return roccat_common2_receive_control_status(usb_dev); } EXPORT_SYMBOL_GPL(roccat_common2_send_with_status); int roccat_common2_device_init_struct(struct usb_device *usb_dev, struct roccat_common2_device *dev) { mutex_init(&dev->lock); return 0; } EXPORT_SYMBOL_GPL(roccat_common2_device_init_struct); ssize_t roccat_common2_sysfs_read(struct file *fp, struct kobject *kobj, char *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off >= real_size) return 0; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&roccat_dev->lock); retval = roccat_common2_receive(usb_dev, command, buf, real_size); mutex_unlock(&roccat_dev->lock); return retval ? retval : real_size; } EXPORT_SYMBOL_GPL(roccat_common2_sysfs_read); ssize_t roccat_common2_sysfs_write(struct file *fp, struct kobject *kobj, void const *buf, loff_t off, size_t count, size_t real_size, uint command) { struct device *dev = kobj_to_dev(kobj)->parent->parent; struct roccat_common2_device *roccat_dev = hid_get_drvdata(dev_get_drvdata(dev)); struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev)); int retval; if (off != 0 || count != real_size) return -EINVAL; mutex_lock(&roccat_dev->lock); retval = roccat_common2_send_with_status(usb_dev, command, buf, real_size); mutex_unlock(&roccat_dev->lock); return retval ? retval : real_size; } EXPORT_SYMBOL_GPL(roccat_common2_sysfs_write); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat common driver"); MODULE_LICENSE("GPL v2");
20 3 20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 // SPDX-License-Identifier: GPL-2.0-or-later /* * Handle bridge arp/nd proxy/suppress * * Copyright (C) 2017 Cumulus Networks * Copyright (c) 2017 Roopa Prabhu <roopa@cumulusnetworks.com> * * Authors: * Roopa Prabhu <roopa@cumulusnetworks.com> */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/neighbour.h> #include <net/arp.h> #include <linux/if_vlan.h> #include <linux/inetdevice.h> #include <net/addrconf.h> #include <net/ipv6_stubs.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ip6_checksum.h> #endif #include "br_private.h" void br_recalculate_neigh_suppress_enabled(struct net_bridge *br) { struct net_bridge_port *p; bool neigh_suppress = false; list_for_each_entry(p, &br->port_list, list) { if (p->flags & (BR_NEIGH_SUPPRESS | BR_NEIGH_VLAN_SUPPRESS)) { neigh_suppress = true; break; } } br_opt_toggle(br, BROPT_NEIGH_SUPPRESS_ENABLED, neigh_suppress); } #if IS_ENABLED(CONFIG_INET) static void br_arp_send(struct net_bridge *br, struct net_bridge_port *p, struct net_device *dev, __be32 dest_ip, __be32 src_ip, const unsigned char *dest_hw, const unsigned char *src_hw, const unsigned char *target_hw, __be16 vlan_proto, u16 vlan_tci) { struct net_bridge_vlan_group *vg; struct sk_buff *skb; u16 pvid; netdev_dbg(dev, "arp send dev %s dst %pI4 dst_hw %pM src %pI4 src_hw %pM\n", dev->name, &dest_ip, dest_hw, &src_ip, src_hw); if (!vlan_tci) { arp_send(ARPOP_REPLY, ETH_P_ARP, dest_ip, dev, src_ip, dest_hw, src_hw, target_hw); return; } skb = arp_create(ARPOP_REPLY, ETH_P_ARP, dest_ip, dev, src_ip, dest_hw, src_hw, target_hw); if (!skb) return; if (p) vg = nbp_vlan_group_rcu(p); else vg = br_vlan_group_rcu(br); pvid = br_get_pvid(vg); if (pvid == (vlan_tci & VLAN_VID_MASK)) vlan_tci = 0; if (vlan_tci) __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); if (p) { arp_xmit(skb); } else { skb_reset_mac_header(skb); __skb_pull(skb, skb_network_offset(skb)); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_HOST; netif_rx(skb); } } static int br_chk_addr_ip(struct net_device *dev, struct netdev_nested_priv *priv) { __be32 ip = *(__be32 *)priv->data; struct in_device *in_dev; __be32 addr = 0; in_dev = __in_dev_get_rcu(dev); if (in_dev) addr = inet_confirm_addr(dev_net(dev), in_dev, 0, ip, RT_SCOPE_HOST); if (addr == ip) return 1; return 0; } static bool br_is_local_ip(struct net_device *dev, __be32 ip) { struct netdev_nested_priv priv = { .data = (void *)&ip, }; if (br_chk_addr_ip(dev, &priv)) return true; /* check if ip is configured on upper dev */ if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip, &priv)) return true; return false; } void br_do_proxy_suppress_arp(struct sk_buff *skb, struct net_bridge *br, u16 vid, struct net_bridge_port *p) { struct net_device *dev = br->dev; struct net_device *vlandev = dev; struct neighbour *n; struct arphdr *parp; u8 *arpptr, *sha; __be32 sip, tip; BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0; if ((dev->flags & IFF_NOARP) || !pskb_may_pull(skb, arp_hdr_len(dev))) return; parp = arp_hdr(skb); if (parp->ar_pro != htons(ETH_P_IP) || parp->ar_hln != dev->addr_len || parp->ar_pln != 4) return; arpptr = (u8 *)parp + sizeof(struct arphdr); sha = arpptr; arpptr += dev->addr_len; /* sha */ memcpy(&sip, arpptr, sizeof(sip)); arpptr += sizeof(sip); arpptr += dev->addr_len; /* tha */ memcpy(&tip, arpptr, sizeof(tip)); if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip)) return; if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) { if (br_is_neigh_suppress_enabled(p, vid)) return; if (parp->ar_op != htons(ARPOP_RREQUEST) && parp->ar_op != htons(ARPOP_RREPLY) && (ipv4_is_zeronet(sip) || sip == tip)) { /* prevent flooding to neigh suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } } if (parp->ar_op != htons(ARPOP_REQUEST)) return; if (vid != 0) { vlandev = __vlan_find_dev_deep_rcu(br->dev, skb->vlan_proto, vid); if (!vlandev) return; } if (br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && br_is_local_ip(vlandev, tip)) { /* its our local ip, so don't proxy reply * and don't forward to neigh suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } n = neigh_lookup(&arp_tbl, &tip, vlandev); if (n) { struct net_bridge_fdb_entry *f; if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { neigh_release(n); return; } f = br_fdb_find_rcu(br, n->ha, vid); if (f) { bool replied = false; if ((p && (p->flags & BR_PROXYARP)) || (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)) || br_is_neigh_suppress_enabled(f->dst, vid)) { if (!vid) br_arp_send(br, p, skb->dev, sip, tip, sha, n->ha, sha, 0, 0); else br_arp_send(br, p, skb->dev, sip, tip, sha, n->ha, sha, skb->vlan_proto, skb_vlan_tag_get(skb)); replied = true; } /* If we have replied or as long as we know the * mac, indicate to arp replied */ if (replied || br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; } neigh_release(n); } } #endif #if IS_ENABLED(CONFIG_IPV6) struct nd_msg *br_is_nd_neigh_msg(const struct sk_buff *skb, struct nd_msg *msg) { struct nd_msg *m; m = skb_header_pointer(skb, skb_network_offset(skb) + sizeof(struct ipv6hdr), sizeof(*msg), msg); if (!m) return NULL; if (m->icmph.icmp6_code != 0 || (m->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION && m->icmph.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT)) return NULL; return m; } static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p, struct sk_buff *request, struct neighbour *n, __be16 vlan_proto, u16 vlan_tci, struct nd_msg *ns) { struct net_device *dev = request->dev; struct net_bridge_vlan_group *vg; struct sk_buff *reply; struct nd_msg *na; struct ipv6hdr *pip6; int na_olen = 8; /* opt hdr + ETH_ALEN for target */ int ns_olen; int i, len; u8 *daddr; u16 pvid; if (!dev) return; len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) + sizeof(*na) + na_olen + dev->needed_tailroom; reply = alloc_skb(len, GFP_ATOMIC); if (!reply) return; reply->protocol = htons(ETH_P_IPV6); reply->dev = dev; skb_reserve(reply, LL_RESERVED_SPACE(dev)); skb_push(reply, sizeof(struct ethhdr)); skb_set_mac_header(reply, 0); daddr = eth_hdr(request)->h_source; /* Do we need option processing ? */ ns_olen = request->len - (skb_network_offset(request) + sizeof(struct ipv6hdr)) - sizeof(*ns); for (i = 0; i < ns_olen - 1; i += (ns->opt[i + 1] << 3)) { if (!ns->opt[i + 1]) { kfree_skb(reply); return; } if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { daddr = ns->opt + i + sizeof(struct nd_opt_hdr); break; } } /* Ethernet header */ ether_addr_copy(eth_hdr(reply)->h_dest, daddr); ether_addr_copy(eth_hdr(reply)->h_source, n->ha); eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); reply->protocol = htons(ETH_P_IPV6); skb_pull(reply, sizeof(struct ethhdr)); skb_set_network_header(reply, 0); skb_put(reply, sizeof(struct ipv6hdr)); /* IPv6 header */ pip6 = ipv6_hdr(reply); memset(pip6, 0, sizeof(struct ipv6hdr)); pip6->version = 6; pip6->priority = ipv6_hdr(request)->priority; pip6->nexthdr = IPPROTO_ICMPV6; pip6->hop_limit = 255; pip6->daddr = ipv6_hdr(request)->saddr; pip6->saddr = *(struct in6_addr *)n->primary_key; skb_pull(reply, sizeof(struct ipv6hdr)); skb_set_transport_header(reply, 0); na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); /* Neighbor Advertisement */ memset(na, 0, sizeof(*na) + na_olen); na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT; na->icmph.icmp6_router = (n->flags & NTF_ROUTER) ? 1 : 0; na->icmph.icmp6_override = 1; na->icmph.icmp6_solicited = 1; na->target = ns->target; ether_addr_copy(&na->opt[2], n->ha); na->opt[0] = ND_OPT_TARGET_LL_ADDR; na->opt[1] = na_olen >> 3; na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, sizeof(*na) + na_olen, IPPROTO_ICMPV6, csum_partial(na, sizeof(*na) + na_olen, 0)); pip6->payload_len = htons(sizeof(*na) + na_olen); skb_push(reply, sizeof(struct ipv6hdr)); skb_push(reply, sizeof(struct ethhdr)); reply->ip_summed = CHECKSUM_UNNECESSARY; if (p) vg = nbp_vlan_group_rcu(p); else vg = br_vlan_group_rcu(br); pvid = br_get_pvid(vg); if (pvid == (vlan_tci & VLAN_VID_MASK)) vlan_tci = 0; if (vlan_tci) __vlan_hwaccel_put_tag(reply, vlan_proto, vlan_tci); netdev_dbg(dev, "nd send dev %s dst %pI6 dst_hw %pM src %pI6 src_hw %pM\n", dev->name, &pip6->daddr, daddr, &pip6->saddr, n->ha); if (p) { dev_queue_xmit(reply); } else { skb_reset_mac_header(reply); __skb_pull(reply, skb_network_offset(reply)); reply->ip_summed = CHECKSUM_UNNECESSARY; reply->pkt_type = PACKET_HOST; netif_rx(reply); } } static int br_chk_addr_ip6(struct net_device *dev, struct netdev_nested_priv *priv) { struct in6_addr *addr = (struct in6_addr *)priv->data; if (ipv6_chk_addr(dev_net(dev), addr, dev, 0)) return 1; return 0; } static bool br_is_local_ip6(struct net_device *dev, struct in6_addr *addr) { struct netdev_nested_priv priv = { .data = (void *)addr, }; if (br_chk_addr_ip6(dev, &priv)) return true; /* check if ip is configured on upper dev */ if (netdev_walk_all_upper_dev_rcu(dev, br_chk_addr_ip6, &priv)) return true; return false; } void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br, u16 vid, struct net_bridge_port *p, struct nd_msg *msg) { struct net_device *dev = br->dev; struct net_device *vlandev = NULL; struct in6_addr *saddr, *daddr; struct ipv6hdr *iphdr; struct neighbour *n; BR_INPUT_SKB_CB(skb)->proxyarp_replied = 0; if (br_is_neigh_suppress_enabled(p, vid)) return; if (msg->icmph.icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT && !msg->icmph.icmp6_solicited) { /* prevent flooding to neigh suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION) return; iphdr = ipv6_hdr(skb); saddr = &iphdr->saddr; daddr = &iphdr->daddr; if (ipv6_addr_any(saddr) || !ipv6_addr_cmp(saddr, daddr)) { /* prevent flooding to neigh suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } if (vid != 0) { /* build neigh table lookup on the vlan device */ vlandev = __vlan_find_dev_deep_rcu(br->dev, skb->vlan_proto, vid); if (!vlandev) return; } else { vlandev = dev; } if (br_is_local_ip6(vlandev, &msg->target)) { /* its our own ip, so don't proxy reply * and don't forward to arp suppress ports */ BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; return; } n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, vlandev); if (n) { struct net_bridge_fdb_entry *f; if (!(READ_ONCE(n->nud_state) & NUD_VALID)) { neigh_release(n); return; } f = br_fdb_find_rcu(br, n->ha, vid); if (f) { bool replied = false; if (br_is_neigh_suppress_enabled(f->dst, vid)) { if (vid != 0) br_nd_send(br, p, skb, n, skb->vlan_proto, skb_vlan_tag_get(skb), msg); else br_nd_send(br, p, skb, n, 0, 0, msg); replied = true; } /* If we have replied or as long as we know the * mac, indicate to NEIGH_SUPPRESS ports that we * have replied */ if (replied || br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) BR_INPUT_SKB_CB(skb)->proxyarp_replied = 1; } neigh_release(n); } } #endif bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid) { if (!p) return false; if (!vid) return !!(p->flags & BR_NEIGH_SUPPRESS); if (p->flags & BR_NEIGH_VLAN_SUPPRESS) { struct net_bridge_vlan_group *vg = nbp_vlan_group_rcu(p); struct net_bridge_vlan *v; v = br_vlan_find(vg, vid); if (!v) return false; return !!(v->priv_flags & BR_VLFLAG_NEIGH_SUPPRESS_ENABLED); } else { return !!(p->flags & BR_NEIGH_SUPPRESS); } }
2 1 1 1 1 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 /* * Copyright (c) 2006 Oracle. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/percpu.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include "rds.h" #include "tcp.h" DEFINE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats) ____cacheline_aligned; static const char * const rds_tcp_stat_names[] = { "tcp_data_ready_calls", "tcp_write_space_calls", "tcp_sndbuf_full", "tcp_connect_raced", "tcp_listen_closed_stale", }; unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter, unsigned int avail) { struct rds_tcp_statistics stats = {0, }; uint64_t *src; uint64_t *sum; size_t i; int cpu; if (avail < ARRAY_SIZE(rds_tcp_stat_names)) goto out; for_each_online_cpu(cpu) { src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); sum = (uint64_t *)&stats; for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++) *(sum++) += *(src++); } rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names, ARRAY_SIZE(rds_tcp_stat_names)); out: return ARRAY_SIZE(rds_tcp_stat_names); }
34 34 90 165 9 41 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _SCSI_SCSI_HOST_H #define _SCSI_SCSI_HOST_H #include <linux/device.h> #include <linux/list.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/blk-mq.h> #include <scsi/scsi.h> struct block_device; struct completion; struct module; struct scsi_cmnd; struct scsi_device; struct scsi_target; struct Scsi_Host; struct scsi_transport_template; #define SG_ALL SG_CHUNK_SIZE #define MODE_UNKNOWN 0x00 #define MODE_INITIATOR 0x01 #define MODE_TARGET 0x02 /** * enum scsi_timeout_action - How to handle a command that timed out. * @SCSI_EH_DONE: The command has already been completed. * @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion. * @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command. */ enum scsi_timeout_action { SCSI_EH_DONE, SCSI_EH_RESET_TIMER, SCSI_EH_NOT_HANDLED, }; struct scsi_host_template { /* * Put fields referenced in IO submission path together in * same cacheline */ /* * Additional per-command data allocated for the driver. */ unsigned int cmd_size; /* * The queuecommand function is used to queue up a scsi * command block to the LLDD. When the driver finished * processing the command the done callback is invoked. * * If queuecommand returns 0, then the driver has accepted the * command. It must also push it to the HBA if the scsi_cmnd * flag SCMD_LAST is set, or if the driver does not implement * commit_rqs. The done() function must be called on the command * when the driver has finished with it. (you may call done on the * command before queuecommand returns, but in this case you * *must* return 0 from queuecommand). * * Queuecommand may also reject the command, in which case it may * not touch the command and must not call done() for it. * * There are two possible rejection returns: * * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but * allow commands to other devices serviced by this host. * * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this * host temporarily. * * For compatibility, any other non-zero return is treated the * same as SCSI_MLQUEUE_HOST_BUSY. * * NOTE: "temporarily" means either until the next command for# * this device/host completes, or a period of time determined by * I/O pressure in the system if there are no other outstanding * commands. * * STATUS: REQUIRED */ int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *); /* * The commit_rqs function is used to trigger a hardware * doorbell after some requests have been queued with * queuecommand, when an error is encountered before sending * the request with SCMD_LAST set. * * STATUS: OPTIONAL */ void (*commit_rqs)(struct Scsi_Host *, u16); struct module *module; const char *name; /* * The info function will return whatever useful information the * developer sees fit. If not provided, then the name field will * be used instead. * * Status: OPTIONAL */ const char *(*info)(struct Scsi_Host *); /* * Ioctl interface * * Status: OPTIONAL */ int (*ioctl)(struct scsi_device *dev, unsigned int cmd, void __user *arg); #ifdef CONFIG_COMPAT /* * Compat handler. Handle 32bit ABI. * When unknown ioctl is passed return -ENOIOCTLCMD. * * Status: OPTIONAL */ int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd, void __user *arg); #endif int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd); /* * This is an error handling strategy routine. You don't need to * define one of these if you don't want to - there is a default * routine that is present that should work in most cases. For those * driver authors that have the inclination and ability to write their * own strategy routine, this is where it is specified. Note - the * strategy routine is *ALWAYS* run in the context of the kernel eh * thread. Thus you are guaranteed to *NOT* be in an interrupt * handler when you execute this, and you are also guaranteed to * *NOT* have any other commands being queued while you are in the * strategy routine. When you return from this function, operations * return to normal. * * See scsi_error.c scsi_unjam_host for additional comments about * what this function should and should not be attempting to do. * * Status: REQUIRED (at least one of them) */ int (* eh_abort_handler)(struct scsi_cmnd *); int (* eh_device_reset_handler)(struct scsi_cmnd *); int (* eh_target_reset_handler)(struct scsi_cmnd *); int (* eh_bus_reset_handler)(struct scsi_cmnd *); int (* eh_host_reset_handler)(struct scsi_cmnd *); /* * Before the mid layer attempts to scan for a new device where none * currently exists, it will call this entry in your driver. Should * your driver need to allocate any structs or perform any other init * items in order to send commands to a currently unused target/lun * combo, then this is where you can perform those allocations. This * is specifically so that drivers won't have to perform any kind of * "is this a new device" checks in their queuecommand routine, * thereby making the hot path a bit quicker. * * Return values: 0 on success, non-0 on failure * * Deallocation: If we didn't find any devices at this ID, you will * get an immediate call to sdev_destroy(). If we find something * here then you will get a call to sdev_configure(), then the * device will be used for however long it is kept around, then when * the device is removed from the system (or * possibly at reboot * time), you will then get a call to sdev_destroy(). This is * assuming you implement sdev_configure and sdev_destroy. * However, if you allocate memory and hang it off the device struct, * then you must implement the sdev_destroy() routine at a minimum * in order to avoid leaking memory * each time a device is tore down. * * Status: OPTIONAL */ int (* sdev_init)(struct scsi_device *); /* * Once the device has responded to an INQUIRY and we know the * device is online, we call into the low level driver with the * struct scsi_device *. If the low level device driver implements * this function, it *must* perform the task of setting the queue * depth on the device. All other tasks are optional and depend * on what the driver supports and various implementation details. * * Things currently recommended to be handled at this time include: * * 1. Setting the device queue depth. Proper setting of this is * described in the comments for scsi_change_queue_depth. * 2. Determining if the device supports the various synchronous * negotiation protocols. The device struct will already have * responded to INQUIRY and the results of the standard items * will have been shoved into the various device flag bits, eg. * device->sdtr will be true if the device supports SDTR messages. * 3. Allocating command structs that the device will need. * 4. Setting the default timeout on this device (if needed). * 5. Anything else the low level driver might want to do on a device * specific setup basis... * 6. Return 0 on success, non-0 on error. The device will be marked * as offline on error so that no access will occur. If you return * non-0, your sdev_destroy routine will never get called for this * device, so don't leave any loose memory hanging around, clean * up after yourself before returning non-0 * * Status: OPTIONAL */ int (* sdev_configure)(struct scsi_device *, struct queue_limits *lim); /* * Immediately prior to deallocating the device and after all activity * has ceased the mid layer calls this point so that the low level * driver may completely detach itself from the scsi device and vice * versa. The low level driver is responsible for freeing any memory * it allocated in the sdev_init or sdev_configure calls. * * Status: OPTIONAL */ void (* sdev_destroy)(struct scsi_device *); /* * Before the mid layer attempts to scan for a new device attached * to a target where no target currently exists, it will call this * entry in your driver. Should your driver need to allocate any * structs or perform any other init items in order to send commands * to a currently unused target, then this is where you can perform * those allocations. * * Return values: 0 on success, non-0 on failure * * Status: OPTIONAL */ int (* target_alloc)(struct scsi_target *); /* * Immediately prior to deallocating the target structure, and * after all activity to attached scsi devices has ceased, the * midlayer calls this point so that the driver may deallocate * and terminate any references to the target. * * Note: This callback is called with the host lock held and hence * must not sleep. * * Status: OPTIONAL */ void (* target_destroy)(struct scsi_target *); /* * If a host has the ability to discover targets on its own instead * of scanning the entire bus, it can fill in this function and * call scsi_scan_host(). This function will be called periodically * until it returns 1 with the scsi_host and the elapsed time of * the scan in jiffies. * * Status: OPTIONAL */ int (* scan_finished)(struct Scsi_Host *, unsigned long); /* * If the host wants to be called before the scan starts, but * after the midlayer has set up ready for the scan, it can fill * in this function. * * Status: OPTIONAL */ void (* scan_start)(struct Scsi_Host *); /* * Fill in this function to allow the queue depth of this host * to be changeable (on a per device basis). Returns either * the current queue depth setting (may be different from what * was passed in) or an error. An error should only be * returned if the requested depth is legal but the driver was * unable to set it. If the requested depth is illegal, the * driver should set and return the closest legal queue depth. * * Status: OPTIONAL */ int (* change_queue_depth)(struct scsi_device *, int); /* * This functions lets the driver expose the queue mapping * to the block layer. * * Status: OPTIONAL */ void (* map_queues)(struct Scsi_Host *shost); /* * SCSI interface of blk_poll - poll for IO completions. * Only applicable if SCSI LLD exposes multiple h/w queues. * * Return value: Number of completed entries found. * * Status: OPTIONAL */ int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num); /* * Check if scatterlists need to be padded for DMA draining. * * Status: OPTIONAL */ bool (* dma_need_drain)(struct request *rq); /* * This function determines the BIOS parameters for a given * harddisk. These tend to be numbers that are made up by * the host adapter. Parameters: * size, device, list (heads, sectors, cylinders) * * Status: OPTIONAL */ int (* bios_param)(struct scsi_device *, struct block_device *, sector_t, int []); /* * This function is called when one or more partitions on the * device reach beyond the end of the device. * * Status: OPTIONAL */ void (*unlock_native_capacity)(struct scsi_device *); /* * Can be used to export driver statistics and other infos to the * world outside the kernel ie. userspace and it also provides an * interface to feed the driver with information. * * Status: OBSOLETE */ int (*show_info)(struct seq_file *, struct Scsi_Host *); int (*write_info)(struct Scsi_Host *, char *, int); /* * This is an optional routine that allows the transport to become * involved when a scsi io timer fires. The return value tells the * timer routine how to finish the io timeout handling. * * Status: OPTIONAL */ enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *); /* * Optional routine that allows the transport to decide if a cmd * is retryable. Return true if the transport is in a state the * cmd should be retried on. */ bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd); /* This is an optional routine that allows transport to initiate * LLD adapter or firmware reset using sysfs attribute. * * Return values: 0 on success, -ve value on failure. * * Status: OPTIONAL */ int (*host_reset)(struct Scsi_Host *shost, int reset_type); #define SCSI_ADAPTER_RESET 1 #define SCSI_FIRMWARE_RESET 2 /* * Name of proc directory */ const char *proc_name; /* * This determines if we will use a non-interrupt driven * or an interrupt driven scheme. It is set to the maximum number * of simultaneous commands a single hw queue in HBA will accept. */ int can_queue; /* * In many instances, especially where disconnect / reconnect are * supported, our host also has an ID on the SCSI bus. If this is * the case, then it must be reserved. Please set this_id to -1 if * your setup is in single initiator mode, and the host lacks an * ID. */ int this_id; /* * This determines the degree to which the host adapter is capable * of scatter-gather. */ unsigned short sg_tablesize; unsigned short sg_prot_tablesize; /* * Set this if the host adapter has limitations beside segment count. */ unsigned int max_sectors; /* * Maximum size in bytes of a single segment. */ unsigned int max_segment_size; unsigned int dma_alignment; /* * DMA scatter gather segment boundary limit. A segment crossing this * boundary will be split in two. */ unsigned long dma_boundary; unsigned long virt_boundary_mask; /* * This specifies "machine infinity" for host templates which don't * limit the transfer size. Note this limit represents an absolute * maximum, and may be over the transfer limits allowed for * individual devices (e.g. 256 for SCSI-1). */ #define SCSI_DEFAULT_MAX_SECTORS 1024 /* * True if this host adapter can make good use of linked commands. * This will allow more than one command to be queued to a given * unit on a given host. Set this to the maximum number of command * blocks to be provided for each device. Set this to 1 for one * command block per lun, 2 for two, etc. Do not set this to 0. * You should make sure that the host adapter will do the right thing * before you try setting this above 1. */ short cmd_per_lun; /* * Allocate tags starting from last allocated tag. */ bool tag_alloc_policy_rr : 1; /* * Track QUEUE_FULL events and reduce queue depth on demand. */ unsigned track_queue_depth:1; /* * This specifies the mode that a LLD supports. */ unsigned supported_mode:2; /* * True for emulated SCSI host adapters (e.g. ATAPI). */ unsigned emulated:1; /* * True if the low-level driver performs its own reset-settle delays. */ unsigned skip_settle_delay:1; /* True if the controller does not support WRITE SAME */ unsigned no_write_same:1; /* True if the host uses host-wide tagspace */ unsigned host_tagset:1; /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */ unsigned queuecommand_may_block:1; /* * Countdown for host blocking with no commands outstanding. */ unsigned int max_host_blocked; /* * Default value for the blocking. If the queue is empty, * host_blocked counts down in the request_fn until it restarts * host operations as zero is reached. * * FIXME: This should probably be a value in the template */ #define SCSI_DEFAULT_HOST_BLOCKED 7 /* * Pointer to the SCSI host sysfs attribute groups, NULL terminated. */ const struct attribute_group **shost_groups; /* * Pointer to the SCSI device attribute groups for this host, * NULL terminated. */ const struct attribute_group **sdev_groups; /* * Vendor Identifier associated with the host * * Note: When specifying vendor_id, be sure to read the * Vendor Type and ID formatting requirements specified in * scsi_netlink.h */ u64 vendor_id; }; /* * Temporary #define for host lock push down. Can be removed when all * drivers have been updated to take advantage of unlocked * queuecommand. * */ #define DEF_SCSI_QCMD(func_name) \ int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd) \ { \ unsigned long irq_flags; \ int rc; \ spin_lock_irqsave(shost->host_lock, irq_flags); \ rc = func_name##_lck(cmd); \ spin_unlock_irqrestore(shost->host_lock, irq_flags); \ return rc; \ } /* * shost state: If you alter this, you also need to alter scsi_sysfs.c * (for the ascii descriptions) and the state model enforcer: * scsi_host_set_state() */ enum scsi_host_state { SHOST_CREATED = 1, SHOST_RUNNING, SHOST_CANCEL, SHOST_DEL, SHOST_RECOVERY, SHOST_CANCEL_RECOVERY, SHOST_DEL_RECOVERY, }; struct Scsi_Host { /* * __devices is protected by the host_lock, but you should * usually use scsi_device_lookup / shost_for_each_device * to access it and don't care about locking yourself. * In the rare case of being in irq context you can use * their __ prefixed variants with the lock held. NEVER * access this list directly from a driver. */ struct list_head __devices; struct list_head __targets; struct list_head starved_list; spinlock_t default_lock; spinlock_t *host_lock; struct mutex scan_mutex;/* serialize scanning activity */ struct list_head eh_abort_list; struct list_head eh_cmd_q; struct task_struct * ehandler; /* Error recovery thread. */ struct completion * eh_action; /* Wait for specific actions on the host. */ wait_queue_head_t host_wait; const struct scsi_host_template *hostt; struct scsi_transport_template *transportt; struct kref tagset_refcnt; struct completion tagset_freed; /* Area to keep a shared tag map */ struct blk_mq_tag_set tag_set; atomic_t host_blocked; unsigned int host_failed; /* commands that failed. protected by host_lock */ unsigned int host_eh_scheduled; /* EH scheduled without command */ unsigned int host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ /* next two fields are used to bound the time spent in error handling */ int eh_deadline; unsigned long last_reset; /* * These three parameters can be used to allow for wide scsi, * and for host adapters that support multiple busses * The last two should be set to 1 more than the actual max id * or lun (e.g. 8 for SCSI parallel systems). */ unsigned int max_channel; unsigned int max_id; u64 max_lun; /* * This is a unique identifier that must be assigned so that we * have some way of identifying each detected host adapter properly * and uniquely. For hosts that do not support more than one card * in the system at one time, this does not need to be set. It is * initialized to 0 in scsi_host_alloc. */ unsigned int unique_id; /* * The maximum length of SCSI commands that this host can accept. * Probably 12 for most host adapters, but could be 16 for others. * or 260 if the driver supports variable length cdbs. * For drivers that don't set this field, a value of 12 is * assumed. */ unsigned short max_cmd_len; int this_id; int can_queue; short cmd_per_lun; short unsigned int sg_tablesize; short unsigned int sg_prot_tablesize; unsigned int max_sectors; unsigned int opt_sectors; unsigned int max_segment_size; unsigned int dma_alignment; unsigned long dma_boundary; unsigned long virt_boundary_mask; /* * In scsi-mq mode, the number of hardware queues supported by the LLD. * * Note: it is assumed that each hardware queue has a queue depth of * can_queue. In other words, the total queue depth per host * is nr_hw_queues * can_queue. However, for when host_tagset is set, * the total queue depth is can_queue. */ unsigned nr_hw_queues; unsigned nr_maps; unsigned active_mode:2; /* * Host has requested that no further requests come through for the * time being. */ unsigned host_self_blocked:1; /* * Host uses correct SCSI ordering not PC ordering. The bit is * set for the minority of drivers whose authors actually read * the spec ;). */ unsigned reverse_ordering:1; /* Task mgmt function in progress */ unsigned tmf_in_progress:1; /* Asynchronous scan in progress */ unsigned async_scan:1; /* Don't resume host in EH */ unsigned eh_noresume:1; /* The controller does not support WRITE SAME */ unsigned no_write_same:1; /* True if the host uses host-wide tagspace */ unsigned host_tagset:1; /* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */ unsigned queuecommand_may_block:1; /* Host responded with short (<36 bytes) INQUIRY result */ unsigned short_inquiry:1; /* The transport requires the LUN bits NOT to be stored in CDB[1] */ unsigned no_scsi2_lun_in_cdb:1; unsigned no_highmem:1; /* * Optional work queue to be utilized by the transport */ struct workqueue_struct *work_q; /* * Task management function work queue */ struct workqueue_struct *tmf_work_q; /* * Value host_blocked counts down from */ unsigned int max_host_blocked; /* Protection Information */ unsigned int prot_capabilities; unsigned char prot_guard_type; /* legacy crap */ unsigned long base; unsigned long io_port; unsigned char n_io_port; unsigned char dma_channel; unsigned int irq; enum scsi_host_state shost_state; /* ldm bits */ struct device shost_gendev, shost_dev; /* * Points to the transport data (if any) which is allocated * separately */ void *shost_data; /* * Points to the physical bus device we'd use to do DMA * Needed just in case we have virtual hosts. */ struct device *dma_dev; /* Delay for runtime autosuspend */ int rpm_autosuspend_delay; /* * We should ensure that this is aligned, both for better performance * and also because some compilers (m68k) don't automatically force * alignment to a long boundary. */ unsigned long hostdata[] /* Used for storage of host specific stuff */ __attribute__ ((aligned (sizeof(unsigned long)))); }; #define class_to_shost(d) \ container_of(d, struct Scsi_Host, shost_dev) #define shost_printk(prefix, shost, fmt, a...) \ dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) static inline void *shost_priv(struct Scsi_Host *shost) { return (void *)shost->hostdata; } int scsi_is_host_device(const struct device *); static inline struct Scsi_Host *dev_to_shost(struct device *dev) { while (!scsi_is_host_device(dev)) { if (!dev->parent) return NULL; dev = dev->parent; } return container_of(dev, struct Scsi_Host, shost_gendev); } static inline int scsi_host_in_recovery(struct Scsi_Host *shost) { return shost->shost_state == SHOST_RECOVERY || shost->shost_state == SHOST_CANCEL_RECOVERY || shost->shost_state == SHOST_DEL_RECOVERY || shost->tmf_in_progress; } extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); extern void scsi_flush_work(struct Scsi_Host *); extern struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *, int); extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *, struct device *, struct device *); #if defined(CONFIG_SCSI_PROC_FS) struct proc_dir_entry * scsi_template_proc_dir(const struct scsi_host_template *sht); #else #define scsi_template_proc_dir(sht) NULL #endif extern void scsi_scan_host(struct Scsi_Host *); extern int scsi_resume_device(struct scsi_device *sdev); extern int scsi_rescan_device(struct scsi_device *sdev); extern void scsi_remove_host(struct Scsi_Host *); extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); extern int scsi_host_busy(struct Scsi_Host *shost); extern void scsi_host_put(struct Scsi_Host *t); extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum); extern const char *scsi_host_state_name(enum scsi_host_state); extern void scsi_host_complete_all_commands(struct Scsi_Host *shost, enum scsi_host_status status); static inline int __must_check scsi_add_host(struct Scsi_Host *host, struct device *dev) { return scsi_add_host_with_dma(host, dev, dev); } static inline struct device *scsi_get_device(struct Scsi_Host *shost) { return shost->shost_gendev.parent; } /** * scsi_host_scan_allowed - Is scanning of this host allowed * @shost: Pointer to Scsi_Host. **/ static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) { return shost->shost_state == SHOST_RUNNING || shost->shost_state == SHOST_RECOVERY; } extern void scsi_unblock_requests(struct Scsi_Host *); extern void scsi_block_requests(struct Scsi_Host *); extern int scsi_host_block(struct Scsi_Host *shost); extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state); void scsi_host_busy_iter(struct Scsi_Host *, bool (*fn)(struct scsi_cmnd *, void *), void *priv); struct class_container; /* * DIF defines the exchange of protection information between * initiator and SBC block device. * * DIX defines the exchange of protection information between OS and * initiator. */ enum scsi_host_prot_capabilities { SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */ SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */ SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */ SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */ SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */ SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */ SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */ }; /* * SCSI hosts which support the Data Integrity Extensions must * indicate their capabilities by setting the prot_capabilities using * this call. */ static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask) { shost->prot_capabilities = mask; } static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) { return shost->prot_capabilities; } static inline int scsi_host_prot_dma(struct Scsi_Host *shost) { return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION; } static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) { static unsigned char cap[] = { 0, SHOST_DIF_TYPE1_PROTECTION, SHOST_DIF_TYPE2_PROTECTION, SHOST_DIF_TYPE3_PROTECTION }; if (target_type >= ARRAY_SIZE(cap)) return 0; return shost->prot_capabilities & cap[target_type] ? target_type : 0; } static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type) { #if defined(CONFIG_BLK_DEV_INTEGRITY) static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION, SHOST_DIX_TYPE1_PROTECTION, SHOST_DIX_TYPE2_PROTECTION, SHOST_DIX_TYPE3_PROTECTION }; if (target_type >= ARRAY_SIZE(cap)) return 0; return shost->prot_capabilities & cap[target_type]; #endif return 0; } /* * All DIX-capable initiators must support the T10-mandated CRC * checksum. Controllers can optionally implement the IP checksum * scheme which has much lower impact on system performance. Note * that the main rationale for the checksum is to match integrity * metadata with data. Detecting bit errors are a job for ECC memory * and buses. */ enum scsi_host_guard_type { SHOST_DIX_GUARD_CRC = 1 << 0, SHOST_DIX_GUARD_IP = 1 << 1, }; static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type) { shost->prot_guard_type = type; } static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost) { return shost->prot_guard_type; } extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); #endif /* _SCSI_SCSI_HOST_H */
15 11 73 75 73 3 73 16 16 16 16 20 617 617 617 617 615 615 617 703 617 702 288 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 // SPDX-License-Identifier: GPL-2.0-only /* -*- linux-c -*- * sysctl_net.c: sysctl interface to net subsystem. * * Begun April 1, 1996, Mike Shaver. * Added /proc/sys/net directories for each protocol family. [MS] * * Revision 1.2 1996/05/08 20:24:40 shaver * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and * NET_IPV4_IP_FORWARD. * * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/sysctl.h> #include <linux/nsproxy.h> #include <net/sock.h> #ifdef CONFIG_INET #include <net/ip.h> #endif #ifdef CONFIG_NET #include <linux/if_ether.h> #endif static struct ctl_table_set * net_ctl_header_lookup(struct ctl_table_root *root) { return &current->nsproxy->net_ns->sysctls; } static int is_seen(struct ctl_table_set *set) { return &current->nsproxy->net_ns->sysctls == set; } /* Return standard mode bits for table entry. */ static int net_ctl_permissions(struct ctl_table_header *head, const struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); /* Allow network administrator to have same access as root. */ if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN)) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; } return table->mode; } static void net_ctl_set_ownership(struct ctl_table_header *head, kuid_t *uid, kgid_t *gid) { struct net *net = container_of(head->set, struct net, sysctls); kuid_t ns_root_uid; kgid_t ns_root_gid; ns_root_uid = make_kuid(net->user_ns, 0); if (uid_valid(ns_root_uid)) *uid = ns_root_uid; ns_root_gid = make_kgid(net->user_ns, 0); if (gid_valid(ns_root_gid)) *gid = ns_root_gid; } static struct ctl_table_root net_sysctl_root = { .lookup = net_ctl_header_lookup, .permissions = net_ctl_permissions, .set_ownership = net_ctl_set_ownership, }; static int __net_init sysctl_net_init(struct net *net) { setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen); return 0; } static void __net_exit sysctl_net_exit(struct net *net) { retire_sysctl_set(&net->sysctls); } static struct pernet_operations sysctl_pernet_ops = { .init = sysctl_net_init, .exit = sysctl_net_exit, }; static struct ctl_table_header *net_header; __init int net_sysctl_init(void) { static struct ctl_table empty[1]; int ret = -ENOMEM; /* Avoid limitations in the sysctl implementation by * registering "/proc/sys/net" as an empty directory not in a * network namespace. */ net_header = register_sysctl_sz("net", empty, 0); if (!net_header) goto out; ret = register_pernet_subsys(&sysctl_pernet_ops); if (ret) goto out1; out: return ret; out1: unregister_sysctl_table(net_header); net_header = NULL; goto out; } /* Verify that sysctls for non-init netns are safe by either: * 1) being read-only, or * 2) having a data pointer which points outside of the global kernel/module * data segment, and rather into the heap where a per-net object was * allocated. */ static void ensure_safe_net_sysctl(struct net *net, const char *path, struct ctl_table *table, size_t table_size) { struct ctl_table *ent; pr_debug("Registering net sysctl (net %p): %s\n", net, path); ent = table; for (size_t i = 0; i < table_size; ent++, i++) { unsigned long addr; const char *where; pr_debug(" procname=%s mode=%o proc_handler=%ps data=%p\n", ent->procname, ent->mode, ent->proc_handler, ent->data); /* If it's not writable inside the netns, then it can't hurt. */ if ((ent->mode & 0222) == 0) { pr_debug(" Not writable by anyone\n"); continue; } /* Where does data point? */ addr = (unsigned long)ent->data; if (is_module_address(addr)) where = "module"; else if (is_kernel_core_data(addr)) where = "kernel"; else continue; /* If it is writable and points to kernel/module global * data, then it's probably a netns leak. */ WARN(1, "sysctl %s/%s: data points to %s global data: %ps\n", path, ent->procname, where, ent->data); /* Make it "safe" by dropping writable perms */ ent->mode &= ~0222; } } struct ctl_table_header *register_net_sysctl_sz(struct net *net, const char *path, struct ctl_table *table, size_t table_size) { if (!net_eq(net, &init_net)) ensure_safe_net_sysctl(net, path, table, table_size); return __register_sysctl_table(&net->sysctls, path, table, table_size); } EXPORT_SYMBOL_GPL(register_net_sysctl_sz); void unregister_net_sysctl_table(struct ctl_table_header *header) { unregister_sysctl_table(header); } EXPORT_SYMBOL_GPL(unregister_net_sysctl_table);
7 5 2 1 5 3 2 4 4 7 7 7 7 2 5 2 3 2 4 2 2 1 2 2 7 7 7 7 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2000-2001 Christoph Hellwig. * Copyright (c) 2016 Krzysztof Blaszkowski */ /* * Veritas filesystem driver - superblock related routines. */ #include <linux/init.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/vfs.h> #include <linux/fs_context.h> #include "vxfs.h" #include "vxfs_extern.h" #include "vxfs_dir.h" #include "vxfs_inode.h" MODULE_AUTHOR("Christoph Hellwig, Krzysztof Blaszkowski"); MODULE_DESCRIPTION("Veritas Filesystem (VxFS) driver"); MODULE_LICENSE("Dual BSD/GPL"); static struct kmem_cache *vxfs_inode_cachep; /** * vxfs_put_super - free superblock resources * @sbp: VFS superblock. * * Description: * vxfs_put_super frees all resources allocated for @sbp * after the last instance of the filesystem is unmounted. */ static void vxfs_put_super(struct super_block *sbp) { struct vxfs_sb_info *infp = VXFS_SBI(sbp); iput(infp->vsi_fship); iput(infp->vsi_ilist); iput(infp->vsi_stilist); brelse(infp->vsi_bp); kfree(infp); } /** * vxfs_statfs - get filesystem information * @dentry: VFS dentry to locate superblock * @bufp: output buffer * * Description: * vxfs_statfs fills the statfs buffer @bufp with information * about the filesystem described by @dentry. * * Returns: * Zero. * * Locking: * No locks held. * * Notes: * This is everything but complete... */ static int vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp) { struct vxfs_sb_info *infp = VXFS_SBI(dentry->d_sb); struct vxfs_sb *raw_sb = infp->vsi_raw; u64 id = huge_encode_dev(dentry->d_sb->s_bdev->bd_dev); bufp->f_type = VXFS_SUPER_MAGIC; bufp->f_bsize = dentry->d_sb->s_blocksize; bufp->f_blocks = fs32_to_cpu(infp, raw_sb->vs_dsize); bufp->f_bfree = fs32_to_cpu(infp, raw_sb->vs_free); bufp->f_bavail = 0; bufp->f_files = 0; bufp->f_ffree = fs32_to_cpu(infp, raw_sb->vs_ifree); bufp->f_fsid = u64_to_fsid(id); bufp->f_namelen = VXFS_NAMELEN; return 0; } static int vxfs_reconfigure(struct fs_context *fc) { sync_filesystem(fc->root->d_sb); fc->sb_flags |= SB_RDONLY; return 0; } static struct inode *vxfs_alloc_inode(struct super_block *sb) { struct vxfs_inode_info *vi; vi = alloc_inode_sb(sb, vxfs_inode_cachep, GFP_KERNEL); if (!vi) return NULL; inode_init_once(&vi->vfs_inode); return &vi->vfs_inode; } static void vxfs_free_inode(struct inode *inode) { kmem_cache_free(vxfs_inode_cachep, VXFS_INO(inode)); } static const struct super_operations vxfs_super_ops = { .alloc_inode = vxfs_alloc_inode, .free_inode = vxfs_free_inode, .evict_inode = vxfs_evict_inode, .put_super = vxfs_put_super, .statfs = vxfs_statfs, }; static int vxfs_try_sb_magic(struct super_block *sbp, struct fs_context *fc, unsigned blk, __fs32 magic) { struct buffer_head *bp; struct vxfs_sb *rsbp; struct vxfs_sb_info *infp = VXFS_SBI(sbp); int silent = fc->sb_flags & SB_SILENT; int rc = -ENOMEM; bp = sb_bread(sbp, blk); do { if (!bp || !buffer_mapped(bp)) { if (!silent) { warnf(fc, "vxfs: unable to read disk superblock at %u", blk); } break; } rc = -EINVAL; rsbp = (struct vxfs_sb *)bp->b_data; if (rsbp->vs_magic != magic) { if (!silent) infof(fc, "vxfs: WRONG superblock magic %08x at %u", rsbp->vs_magic, blk); break; } rc = 0; infp->vsi_raw = rsbp; infp->vsi_bp = bp; } while (0); if (rc) { infp->vsi_raw = NULL; infp->vsi_bp = NULL; brelse(bp); } return rc; } /** * vxfs_fill_super - read superblock into memory and initialize filesystem * @sbp: VFS superblock (to fill) * @fc: filesytem context * * Description: * We are called on the first mount of a filesystem to read the * superblock into memory and do some basic setup. * * Returns: * The superblock on success, else %NULL. * * Locking: * We are under @sbp->s_lock. */ static int vxfs_fill_super(struct super_block *sbp, struct fs_context *fc) { struct vxfs_sb_info *infp; struct vxfs_sb *rsbp; u_long bsize; struct inode *root; int ret = -EINVAL; int silent = fc->sb_flags & SB_SILENT; u32 j; sbp->s_flags |= SB_RDONLY; infp = kzalloc(sizeof(*infp), GFP_KERNEL); if (!infp) { warnf(fc, "vxfs: unable to allocate incore superblock"); return -ENOMEM; } bsize = sb_min_blocksize(sbp, BLOCK_SIZE); if (!bsize) { warnf(fc, "vxfs: unable to set blocksize"); goto out; } sbp->s_op = &vxfs_super_ops; sbp->s_fs_info = infp; sbp->s_time_min = 0; sbp->s_time_max = U32_MAX; if (!vxfs_try_sb_magic(sbp, fc, 1, (__force __fs32)cpu_to_le32(VXFS_SUPER_MAGIC))) { /* Unixware, x86 */ infp->byte_order = VXFS_BO_LE; } else if (!vxfs_try_sb_magic(sbp, fc, 8, (__force __fs32)cpu_to_be32(VXFS_SUPER_MAGIC))) { /* HP-UX, parisc */ infp->byte_order = VXFS_BO_BE; } else { if (!silent) infof(fc, "vxfs: can't find superblock."); goto out; } rsbp = infp->vsi_raw; j = fs32_to_cpu(infp, rsbp->vs_version); if ((j < 2 || j > 4) && !silent) { infof(fc, "vxfs: unsupported VxFS version (%d)", j); goto out; } #ifdef DIAGNOSTIC printk(KERN_DEBUG "vxfs: supported VxFS version (%d)\n", j); printk(KERN_DEBUG "vxfs: blocksize: %d\n", fs32_to_cpu(infp, rsbp->vs_bsize)); #endif sbp->s_magic = fs32_to_cpu(infp, rsbp->vs_magic); infp->vsi_oltext = fs32_to_cpu(infp, rsbp->vs_oltext[0]); infp->vsi_oltsize = fs32_to_cpu(infp, rsbp->vs_oltsize); j = fs32_to_cpu(infp, rsbp->vs_bsize); if (!sb_set_blocksize(sbp, j)) { warnf(fc, "vxfs: unable to set final block size"); goto out; } if (vxfs_read_olt(sbp, bsize)) { warnf(fc, "vxfs: unable to read olt"); goto out; } if (vxfs_read_fshead(sbp)) { warnf(fc, "vxfs: unable to read fshead"); goto out; } root = vxfs_iget(sbp, VXFS_ROOT_INO); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out; } sbp->s_root = d_make_root(root); if (!sbp->s_root) { warnf(fc, "vxfs: unable to get root dentry."); goto out_free_ilist; } return 0; out_free_ilist: iput(infp->vsi_fship); iput(infp->vsi_ilist); iput(infp->vsi_stilist); out: brelse(infp->vsi_bp); kfree(infp); return ret; } /* * The usual module blurb. */ static int vxfs_get_tree(struct fs_context *fc) { return get_tree_bdev(fc, vxfs_fill_super); } static const struct fs_context_operations vxfs_context_ops = { .get_tree = vxfs_get_tree, .reconfigure = vxfs_reconfigure, }; static int vxfs_init_fs_context(struct fs_context *fc) { fc->ops = &vxfs_context_ops; return 0; } static struct file_system_type vxfs_fs_type = { .owner = THIS_MODULE, .name = "vxfs", .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV, .init_fs_context = vxfs_init_fs_context, }; MODULE_ALIAS_FS("vxfs"); /* makes mount -t vxfs autoload the module */ MODULE_ALIAS("vxfs"); static int __init vxfs_init(void) { int rv; vxfs_inode_cachep = kmem_cache_create_usercopy("vxfs_inode", sizeof(struct vxfs_inode_info), 0, SLAB_RECLAIM_ACCOUNT, offsetof(struct vxfs_inode_info, vii_immed.vi_immed), sizeof_field(struct vxfs_inode_info, vii_immed.vi_immed), NULL); if (!vxfs_inode_cachep) return -ENOMEM; rv = register_filesystem(&vxfs_fs_type); if (rv < 0) kmem_cache_destroy(vxfs_inode_cachep); return rv; } static void __exit vxfs_cleanup(void) { unregister_filesystem(&vxfs_fs_type); /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(vxfs_inode_cachep); } module_init(vxfs_init); module_exit(vxfs_cleanup);
3 3 3 3 3 3 3 27 28 28 10 9 2 9 28 28 28 28 28 28 28 28 4 1 1 1 1 27 27 27 27 27 27 18 27 16 17 2 15 15 26 24 1 26 13 8 23 26 2 1 31 4 27 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * Copyright (c) 2016-2018 Christoph Hellwig. * All Rights Reserved. */ #include "xfs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_iomap.h" #include "xfs_trace.h" #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_reflink.h" #include "xfs_errortag.h" #include "xfs_error.h" struct xfs_writepage_ctx { struct iomap_writepage_ctx ctx; unsigned int data_seq; unsigned int cow_seq; }; static inline struct xfs_writepage_ctx * XFS_WPC(struct iomap_writepage_ctx *ctx) { return container_of(ctx, struct xfs_writepage_ctx, ctx); } /* * Fast and loose check if this write could update the on-disk inode size. */ static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend) { return ioend->io_offset + ioend->io_size > XFS_I(ioend->io_inode)->i_disk_size; } /* * Update on-disk file size now that data has been written to disk. */ int xfs_setfilesize( struct xfs_inode *ip, xfs_off_t offset, size_t size) { struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; xfs_fsize_t isize; int error; error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp); if (error) return error; xfs_ilock(ip, XFS_ILOCK_EXCL); isize = xfs_new_eof(ip, offset + size); if (!isize) { xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_trans_cancel(tp); return 0; } trace_xfs_setfilesize(ip, offset, size); ip->i_disk_size = isize; xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); return xfs_trans_commit(tp); } /* * IO write completion. */ STATIC void xfs_end_ioend( struct iomap_ioend *ioend) { struct xfs_inode *ip = XFS_I(ioend->io_inode); struct xfs_mount *mp = ip->i_mount; xfs_off_t offset = ioend->io_offset; size_t size = ioend->io_size; unsigned int nofs_flag; int error; /* * We can allocate memory here while doing writeback on behalf of * memory reclaim. To avoid memory allocation deadlocks set the * task-wide nofs context for the following operations. */ nofs_flag = memalloc_nofs_save(); /* * Just clean up the in-memory structures if the fs has been shut down. */ if (xfs_is_shutdown(mp)) { error = -EIO; goto done; } /* * Clean up all COW blocks and underlying data fork delalloc blocks on * I/O error. The delalloc punch is required because this ioend was * mapped to blocks in the COW fork and the associated pages are no * longer dirty. If we don't remove delalloc blocks here, they become * stale and can corrupt free space accounting on unmount. */ error = blk_status_to_errno(ioend->io_bio.bi_status); if (unlikely(error)) { if (ioend->io_flags & IOMAP_F_SHARED) { xfs_reflink_cancel_cow_range(ip, offset, size, true); xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, offset, offset + size); } goto done; } /* * Success: commit the COW or unwritten blocks if needed. */ if (ioend->io_flags & IOMAP_F_SHARED) error = xfs_reflink_end_cow(ip, offset, size); else if (ioend->io_type == IOMAP_UNWRITTEN) error = xfs_iomap_write_unwritten(ip, offset, size, false); if (!error && xfs_ioend_is_append(ioend)) error = xfs_setfilesize(ip, offset, size); done: iomap_finish_ioends(ioend, error); memalloc_nofs_restore(nofs_flag); } /* * Finish all pending IO completions that require transactional modifications. * * We try to merge physical and logically contiguous ioends before completion to * minimise the number of transactions we need to perform during IO completion. * Both unwritten extent conversion and COW remapping need to iterate and modify * one physical extent at a time, so we gain nothing by merging physically * discontiguous extents here. * * The ioend chain length that we can be processing here is largely unbound in * length and we may have to perform significant amounts of work on each ioend * to complete it. Hence we have to be careful about holding the CPU for too * long in this loop. */ void xfs_end_io( struct work_struct *work) { struct xfs_inode *ip = container_of(work, struct xfs_inode, i_ioend_work); struct iomap_ioend *ioend; struct list_head tmp; unsigned long flags; spin_lock_irqsave(&ip->i_ioend_lock, flags); list_replace_init(&ip->i_ioend_list, &tmp); spin_unlock_irqrestore(&ip->i_ioend_lock, flags); iomap_sort_ioends(&tmp); while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend, io_list))) { list_del_init(&ioend->io_list); iomap_ioend_try_merge(ioend, &tmp); xfs_end_ioend(ioend); cond_resched(); } } STATIC void xfs_end_bio( struct bio *bio) { struct iomap_ioend *ioend = iomap_ioend_from_bio(bio); struct xfs_inode *ip = XFS_I(ioend->io_inode); unsigned long flags; spin_lock_irqsave(&ip->i_ioend_lock, flags); if (list_empty(&ip->i_ioend_list)) WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue, &ip->i_ioend_work)); list_add_tail(&ioend->io_list, &ip->i_ioend_list); spin_unlock_irqrestore(&ip->i_ioend_lock, flags); } /* * Fast revalidation of the cached writeback mapping. Return true if the current * mapping is valid, false otherwise. */ static bool xfs_imap_valid( struct iomap_writepage_ctx *wpc, struct xfs_inode *ip, loff_t offset) { if (offset < wpc->iomap.offset || offset >= wpc->iomap.offset + wpc->iomap.length) return false; /* * If this is a COW mapping, it is sufficient to check that the mapping * covers the offset. Be careful to check this first because the caller * can revalidate a COW mapping without updating the data seqno. */ if (wpc->iomap.flags & IOMAP_F_SHARED) return true; /* * This is not a COW mapping. Check the sequence number of the data fork * because concurrent changes could have invalidated the extent. Check * the COW fork because concurrent changes since the last time we * checked (and found nothing at this offset) could have added * overlapping blocks. */ if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) { trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap, XFS_WPC(wpc)->data_seq, XFS_DATA_FORK); return false; } if (xfs_inode_has_cow_data(ip) && XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) { trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap, XFS_WPC(wpc)->cow_seq, XFS_COW_FORK); return false; } return true; } static int xfs_map_blocks( struct iomap_writepage_ctx *wpc, struct inode *inode, loff_t offset, unsigned int len) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; ssize_t count = i_blocksize(inode); xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); xfs_fileoff_t cow_fsb; int whichfork; struct xfs_bmbt_irec imap; struct xfs_iext_cursor icur; int retries = 0; int error = 0; unsigned int *seq; if (xfs_is_shutdown(mp)) return -EIO; XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS); /* * COW fork blocks can overlap data fork blocks even if the blocks * aren't shared. COW I/O always takes precedent, so we must always * check for overlap on reflink inodes unless the mapping is already a * COW one, or the COW fork hasn't changed from the last time we looked * at it. * * It's safe to check the COW fork if_seq here without the ILOCK because * we've indirectly protected against concurrent updates: writeback has * the page locked, which prevents concurrent invalidations by reflink * and directio and prevents concurrent buffered writes to the same * page. Changes to if_seq always happen under i_lock, which protects * against concurrent updates and provides a memory barrier on the way * out that ensures that we always see the current value. */ if (xfs_imap_valid(wpc, ip, offset)) return 0; /* * If we don't have a valid map, now it's time to get a new one for this * offset. This will convert delayed allocations (including COW ones) * into real extents. If we return without a valid map, it means we * landed in a hole and we skip the block. */ retry: cow_fsb = NULLFILEOFF; whichfork = XFS_DATA_FORK; xfs_ilock(ip, XFS_ILOCK_SHARED); ASSERT(!xfs_need_iread_extents(&ip->i_df)); /* * Check if this is offset is covered by a COW extents, and if yes use * it directly instead of looking up anything in the data fork. */ if (xfs_inode_has_cow_data(ip) && xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap)) cow_fsb = imap.br_startoff; if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq); xfs_iunlock(ip, XFS_ILOCK_SHARED); whichfork = XFS_COW_FORK; goto allocate_blocks; } /* * No COW extent overlap. Revalidate now that we may have updated * ->cow_seq. If the data mapping is still valid, we're done. */ if (xfs_imap_valid(wpc, ip, offset)) { xfs_iunlock(ip, XFS_ILOCK_SHARED); return 0; } /* * If we don't have a valid map, now it's time to get a new one for this * offset. This will convert delayed allocations (including COW ones) * into real extents. */ if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) imap.br_startoff = end_fsb; /* fake a hole past EOF */ XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq); xfs_iunlock(ip, XFS_ILOCK_SHARED); /* landed in a hole or beyond EOF? */ if (imap.br_startoff > offset_fsb) { imap.br_blockcount = imap.br_startoff - offset_fsb; imap.br_startoff = offset_fsb; imap.br_startblock = HOLESTARTBLOCK; imap.br_state = XFS_EXT_NORM; } /* * Truncate to the next COW extent if there is one. This is the only * opportunity to do this because we can skip COW fork lookups for the * subsequent blocks in the mapping; however, the requirement to treat * the COW range separately remains. */ if (cow_fsb != NULLFILEOFF && cow_fsb < imap.br_startoff + imap.br_blockcount) imap.br_blockcount = cow_fsb - imap.br_startoff; /* got a delalloc extent? */ if (imap.br_startblock != HOLESTARTBLOCK && isnullstartblock(imap.br_startblock)) goto allocate_blocks; xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq); trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap); return 0; allocate_blocks: /* * Convert a dellalloc extent to a real one. The current page is held * locked so nothing could have removed the block backing offset_fsb, * although it could have moved from the COW to the data fork by another * thread. */ if (whichfork == XFS_COW_FORK) seq = &XFS_WPC(wpc)->cow_seq; else seq = &XFS_WPC(wpc)->data_seq; error = xfs_bmapi_convert_delalloc(ip, whichfork, offset, &wpc->iomap, seq); if (error) { /* * If we failed to find the extent in the COW fork we might have * raced with a COW to data fork conversion or truncate. * Restart the lookup to catch the extent in the data fork for * the former case, but prevent additional retries to avoid * looping forever for the latter case. */ if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++) goto retry; ASSERT(error != -EAGAIN); return error; } /* * Due to merging the return real extent might be larger than the * original delalloc one. Trim the return extent to the next COW * boundary again to force a re-lookup. */ if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) { loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb); if (cow_offset < wpc->iomap.offset + wpc->iomap.length) wpc->iomap.length = cow_offset - wpc->iomap.offset; } ASSERT(wpc->iomap.offset <= offset); ASSERT(wpc->iomap.offset + wpc->iomap.length > offset); trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap); return 0; } static int xfs_prepare_ioend( struct iomap_ioend *ioend, int status) { unsigned int nofs_flag; /* * We can allocate memory here while doing writeback on behalf of * memory reclaim. To avoid memory allocation deadlocks set the * task-wide nofs context for the following operations. */ nofs_flag = memalloc_nofs_save(); /* Convert CoW extents to regular */ if (!status && (ioend->io_flags & IOMAP_F_SHARED)) { status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode), ioend->io_offset, ioend->io_size); } memalloc_nofs_restore(nofs_flag); /* send ioends that might require a transaction to the completion wq */ if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN || (ioend->io_flags & IOMAP_F_SHARED)) ioend->io_bio.bi_end_io = xfs_end_bio; return status; } /* * If the folio has delalloc blocks on it, the caller is asking us to punch them * out. If we don't, we can leave a stale delalloc mapping covered by a clean * page that needs to be dirtied again before the delalloc mapping can be * converted. This stale delalloc mapping can trip up a later direct I/O read * operation on the same region. * * We prevent this by truncating away the delalloc regions on the folio. Because * they are delalloc, we can do this without needing a transaction. Indeed - if * we get ENOSPC errors, we have to be able to do this truncation without a * transaction as there is no space left for block reservation (typically why * we see a ENOSPC in writeback). */ static void xfs_discard_folio( struct folio *folio, loff_t pos) { struct xfs_inode *ip = XFS_I(folio->mapping->host); struct xfs_mount *mp = ip->i_mount; if (xfs_is_shutdown(mp)) return; xfs_alert_ratelimited(mp, "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.", folio, ip->i_ino, pos); /* * The end of the punch range is always the offset of the first * byte of the next folio. Hence the end offset is only dependent on the * folio itself and not the start offset that is passed in. */ xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos, folio_pos(folio) + folio_size(folio)); } static const struct iomap_writeback_ops xfs_writeback_ops = { .map_blocks = xfs_map_blocks, .prepare_ioend = xfs_prepare_ioend, .discard_folio = xfs_discard_folio, }; STATIC int xfs_vm_writepages( struct address_space *mapping, struct writeback_control *wbc) { struct xfs_writepage_ctx wpc = { }; xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops); } STATIC int xfs_dax_writepages( struct address_space *mapping, struct writeback_control *wbc) { struct xfs_inode *ip = XFS_I(mapping->host); xfs_iflags_clear(ip, XFS_ITRUNCATED); return dax_writeback_mapping_range(mapping, xfs_inode_buftarg(ip)->bt_daxdev, wbc); } STATIC sector_t xfs_vm_bmap( struct address_space *mapping, sector_t block) { struct xfs_inode *ip = XFS_I(mapping->host); trace_xfs_vm_bmap(ip); /* * The swap code (ab-)uses ->bmap to get a block mapping and then * bypasses the file system for actual I/O. We really can't allow * that on reflinks inodes, so we have to skip out here. And yes, * 0 is the magic code for a bmap error. * * Since we don't pass back blockdev info, we can't return bmap * information for rt files either. */ if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip)) return 0; return iomap_bmap(mapping, block, &xfs_read_iomap_ops); } STATIC int xfs_vm_read_folio( struct file *unused, struct folio *folio) { return iomap_read_folio(folio, &xfs_read_iomap_ops); } STATIC void xfs_vm_readahead( struct readahead_control *rac) { iomap_readahead(rac, &xfs_read_iomap_ops); } static int xfs_iomap_swapfile_activate( struct swap_info_struct *sis, struct file *swap_file, sector_t *span) { sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev; return iomap_swapfile_activate(sis, swap_file, span, &xfs_read_iomap_ops); } const struct address_space_operations xfs_address_space_operations = { .read_folio = xfs_vm_read_folio, .readahead = xfs_vm_readahead, .writepages = xfs_vm_writepages, .dirty_folio = iomap_dirty_folio, .release_folio = iomap_release_folio, .invalidate_folio = iomap_invalidate_folio, .bmap = xfs_vm_bmap, .migrate_folio = filemap_migrate_folio, .is_partially_uptodate = iomap_is_partially_uptodate, .error_remove_folio = generic_error_remove_folio, .swap_activate = xfs_iomap_swapfile_activate, }; const struct address_space_operations xfs_dax_aops = { .writepages = xfs_dax_writepages, .dirty_folio = noop_dirty_folio, .swap_activate = xfs_iomap_swapfile_activate, };
3590 3606 3594 3589 3585 3592 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 // SPDX-License-Identifier: GPL-2.0-or-later /* * The "hash function" used as the core of the ChaCha stream cipher (RFC7539) * * Copyright (C) 2015 Martin Willi */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/unaligned.h> #include <crypto/chacha.h> static void chacha_permute(u32 *x, int nrounds) { int i; /* whitelist the allowed round counts */ WARN_ON_ONCE(nrounds != 20 && nrounds != 12); for (i = 0; i < nrounds; i += 2) { x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16); x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12); x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12); x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12); x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12); x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8); x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8); x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8); x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8); x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7); x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7); x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7); x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7); x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16); x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16); x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16); x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16); x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12); x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12); x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12); x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12); x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8); x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8); x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8); x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8); x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7); x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7); x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); } } /** * chacha_block_generic - generate one keystream block and increment block counter * @state: input state matrix (16 32-bit words) * @stream: output keystream block (64 bytes) * @nrounds: number of rounds (20 or 12; 20 is recommended) * * This is the ChaCha core, a function from 64-byte strings to 64-byte strings. * The caller has already converted the endianness of the input. This function * also handles incrementing the block counter in the input matrix. */ void chacha_block_generic(u32 *state, u8 *stream, int nrounds) { u32 x[16]; int i; memcpy(x, state, 64); chacha_permute(x, nrounds); for (i = 0; i < ARRAY_SIZE(x); i++) put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]); state[12]++; } EXPORT_SYMBOL(chacha_block_generic); /** * hchacha_block_generic - abbreviated ChaCha core, for XChaCha * @state: input state matrix (16 32-bit words) * @stream: output (8 32-bit words) * @nrounds: number of rounds (20 or 12; 20 is recommended) * * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha * skips the final addition of the initial state, and outputs only certain words * of the state. It should not be used for streaming directly. */ void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds) { u32 x[16]; memcpy(x, state, 64); chacha_permute(x, nrounds); memcpy(&stream[0], &x[0], 16); memcpy(&stream[4], &x[12], 16); } EXPORT_SYMBOL(hchacha_block_generic);
14 14 14 14 14 2 15 15 14 14 12 12 2 10 12 1 12 12 12 12 12 12 1 12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2003-2008 Takahiro Hirofuchi * Copyright (C) 2015-2016 Nobuo Iwata */ #include <linux/kthread.h> #include <linux/file.h> #include <linux/net.h> #include <linux/platform_device.h> #include <linux/slab.h> /* Hardening for Spectre-v1 */ #include <linux/nospec.h> #include "usbip_common.h" #include "vhci.h" /* TODO: refine locking ?*/ /* * output example: * hub port sta spd dev sockfd local_busid * hs 0000 004 000 00000000 000003 1-2.3 * ................................................ * ss 0008 004 000 00000000 000004 2-3.4 * ................................................ * * Output includes socket fd instead of socket pointer address to avoid * leaking kernel memory address in: * /sys/devices/platform/vhci_hcd.0/status and in debug output. * The socket pointer address is not used at the moment and it was made * visible as a convenient way to find IP address from socket pointer * address by looking up /proc/net/{tcp,tcp6}. As this opens a security * hole, the change is made to use sockfd instead. * */ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) { if (hub == HUB_SPEED_HIGH) *out += sprintf(*out, "hs %04u %03u ", port, vdev->ud.status); else /* hub == HUB_SPEED_SUPER */ *out += sprintf(*out, "ss %04u %03u ", port, vdev->ud.status); if (vdev->ud.status == VDEV_ST_USED) { *out += sprintf(*out, "%03u %08x ", vdev->speed, vdev->devid); *out += sprintf(*out, "%06u %s", vdev->ud.sockfd, dev_name(&vdev->udev->dev)); } else { *out += sprintf(*out, "000 00000000 "); *out += sprintf(*out, "000000 0-0"); } *out += sprintf(*out, "\n"); } /* Sysfs entry to show port status */ static ssize_t status_show_vhci(int pdev_nr, char *out) { struct platform_device *pdev = vhcis[pdev_nr].pdev; struct vhci *vhci; struct usb_hcd *hcd; struct vhci_hcd *vhci_hcd; char *s = out; int i; unsigned long flags; if (!pdev || !out) { usbip_dbg_vhci_sysfs("show status error\n"); return 0; } hcd = platform_get_drvdata(pdev); vhci_hcd = hcd_to_vhci_hcd(hcd); vhci = vhci_hcd->vhci; spin_lock_irqsave(&vhci->lock, flags); for (i = 0; i < VHCI_HC_PORTS; i++) { struct vhci_device *vdev = &vhci->vhci_hcd_hs->vdev[i]; spin_lock(&vdev->ud.lock); port_show_vhci(&out, HUB_SPEED_HIGH, pdev_nr * VHCI_PORTS + i, vdev); spin_unlock(&vdev->ud.lock); } for (i = 0; i < VHCI_HC_PORTS; i++) { struct vhci_device *vdev = &vhci->vhci_hcd_ss->vdev[i]; spin_lock(&vdev->ud.lock); port_show_vhci(&out, HUB_SPEED_SUPER, pdev_nr * VHCI_PORTS + VHCI_HC_PORTS + i, vdev); spin_unlock(&vdev->ud.lock); } spin_unlock_irqrestore(&vhci->lock, flags); return out - s; } static ssize_t status_show_not_ready(int pdev_nr, char *out) { char *s = out; int i = 0; for (i = 0; i < VHCI_HC_PORTS; i++) { out += sprintf(out, "hs %04u %03u ", (pdev_nr * VHCI_PORTS) + i, VDEV_ST_NOTASSIGNED); out += sprintf(out, "000 00000000 0000000000000000 0-0"); out += sprintf(out, "\n"); } for (i = 0; i < VHCI_HC_PORTS; i++) { out += sprintf(out, "ss %04u %03u ", (pdev_nr * VHCI_PORTS) + VHCI_HC_PORTS + i, VDEV_ST_NOTASSIGNED); out += sprintf(out, "000 00000000 0000000000000000 0-0"); out += sprintf(out, "\n"); } return out - s; } static int status_name_to_id(const char *name) { char *c; long val; int ret; c = strchr(name, '.'); if (c == NULL) return 0; ret = kstrtol(c+1, 10, &val); if (ret < 0) return ret; return val; } static ssize_t status_show(struct device *dev, struct device_attribute *attr, char *out) { char *s = out; int pdev_nr; out += sprintf(out, "hub port sta spd dev sockfd local_busid\n"); pdev_nr = status_name_to_id(attr->attr.name); if (pdev_nr < 0) out += status_show_not_ready(pdev_nr, out); else out += status_show_vhci(pdev_nr, out); return out - s; } static ssize_t nports_show(struct device *dev, struct device_attribute *attr, char *out) { char *s = out; /* * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, * thus the * 2. */ out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); return out - s; } static DEVICE_ATTR_RO(nports); /* Sysfs entry to shutdown a virtual connection */ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport) { struct vhci_device *vdev = &vhci_hcd->vdev[rhport]; struct vhci *vhci = vhci_hcd->vhci; unsigned long flags; usbip_dbg_vhci_sysfs("enter\n"); mutex_lock(&vdev->ud.sysfs_lock); /* lock */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); if (vdev->ud.status == VDEV_ST_NULL) { pr_err("not connected %d\n", vdev->ud.status); /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); mutex_unlock(&vdev->ud.sysfs_lock); return -EINVAL; } /* unlock */ spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN); mutex_unlock(&vdev->ud.sysfs_lock); return 0; } static int valid_port(__u32 *pdev_nr, __u32 *rhport) { if (*pdev_nr >= vhci_num_controllers) { pr_err("pdev %u\n", *pdev_nr); return 0; } *pdev_nr = array_index_nospec(*pdev_nr, vhci_num_controllers); if (*rhport >= VHCI_HC_PORTS) { pr_err("rhport %u\n", *rhport); return 0; } *rhport = array_index_nospec(*rhport, VHCI_HC_PORTS); return 1; } static ssize_t detach_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { __u32 port = 0, pdev_nr = 0, rhport = 0; struct usb_hcd *hcd; struct vhci_hcd *vhci_hcd; int ret; if (kstrtoint(buf, 10, &port) < 0) return -EINVAL; pdev_nr = port_to_pdev_nr(port); rhport = port_to_rhport(port); if (!valid_port(&pdev_nr, &rhport)) return -EINVAL; hcd = platform_get_drvdata(vhcis[pdev_nr].pdev); if (hcd == NULL) { dev_err(dev, "port is not ready %u\n", port); return -EAGAIN; } usbip_dbg_vhci_sysfs("rhport %d\n", rhport); if ((port / VHCI_HC_PORTS) % 2) vhci_hcd = hcd_to_vhci_hcd(hcd)->vhci->vhci_hcd_ss; else vhci_hcd = hcd_to_vhci_hcd(hcd)->vhci->vhci_hcd_hs; ret = vhci_port_disconnect(vhci_hcd, rhport); if (ret < 0) return -EINVAL; usbip_dbg_vhci_sysfs("Leave\n"); return count; } static DEVICE_ATTR_WO(detach); static int valid_args(__u32 *pdev_nr, __u32 *rhport, enum usb_device_speed speed) { if (!valid_port(pdev_nr, rhport)) { return 0; } switch (speed) { case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: case USB_SPEED_WIRELESS: case USB_SPEED_SUPER: case USB_SPEED_SUPER_PLUS: break; default: pr_err("Failed attach request for unsupported USB speed: %s\n", usb_speed_string(speed)); return 0; } return 1; } /* Sysfs entry to establish a virtual connection */ /* * To start a new USB/IP attachment, a userland program needs to setup a TCP * connection and then write its socket descriptor with remote device * information into this sysfs file. * * A remote device is virtually attached to the root-hub port of @rhport with * @speed. @devid is embedded into a request to specify the remote device in a * server host. * * write() returns 0 on success, else negative errno. */ static ssize_t attach_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct socket *socket; int sockfd = 0; __u32 port = 0, pdev_nr = 0, rhport = 0, devid = 0, speed = 0; struct usb_hcd *hcd; struct vhci_hcd *vhci_hcd; struct vhci_device *vdev; struct vhci *vhci; int err; unsigned long flags; struct task_struct *tcp_rx = NULL; struct task_struct *tcp_tx = NULL; /* * @rhport: port number of vhci_hcd * @sockfd: socket descriptor of an established TCP connection * @devid: unique device identifier in a remote host * @speed: usb device speed in a remote host */ if (sscanf(buf, "%u %u %u %u", &port, &sockfd, &devid, &speed) != 4) return -EINVAL; pdev_nr = port_to_pdev_nr(port); rhport = port_to_rhport(port); usbip_dbg_vhci_sysfs("port(%u) pdev(%d) rhport(%u)\n", port, pdev_nr, rhport); usbip_dbg_vhci_sysfs("sockfd(%u) devid(%u) speed(%u)\n", sockfd, devid, speed); /* check received parameters */ if (!valid_args(&pdev_nr, &rhport, speed)) return -EINVAL; hcd = platform_get_drvdata(vhcis[pdev_nr].pdev); if (hcd == NULL) { dev_err(dev, "port %d is not ready\n", port); return -EAGAIN; } vhci_hcd = hcd_to_vhci_hcd(hcd); vhci = vhci_hcd->vhci; if (speed >= USB_SPEED_SUPER) vdev = &vhci->vhci_hcd_ss->vdev[rhport]; else vdev = &vhci->vhci_hcd_hs->vdev[rhport]; mutex_lock(&vdev->ud.sysfs_lock); /* Extract socket from fd. */ socket = sockfd_lookup(sockfd, &err); if (!socket) { dev_err(dev, "failed to lookup sock"); err = -EINVAL; goto unlock_mutex; } if (socket->type != SOCK_STREAM) { dev_err(dev, "Expecting SOCK_STREAM - found %d", socket->type); sockfd_put(socket); err = -EINVAL; goto unlock_mutex; } /* create threads before locking */ tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx"); if (IS_ERR(tcp_rx)) { sockfd_put(socket); err = -EINVAL; goto unlock_mutex; } tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx"); if (IS_ERR(tcp_tx)) { kthread_stop(tcp_rx); sockfd_put(socket); err = -EINVAL; goto unlock_mutex; } /* get task structs now */ get_task_struct(tcp_rx); get_task_struct(tcp_tx); /* now begin lock until setting vdev status set */ spin_lock_irqsave(&vhci->lock, flags); spin_lock(&vdev->ud.lock); if (vdev->ud.status != VDEV_ST_NULL) { /* end of the lock */ spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); sockfd_put(socket); kthread_stop_put(tcp_rx); kthread_stop_put(tcp_tx); dev_err(dev, "port %d already used\n", rhport); /* * Will be retried from userspace * if there's another free port. */ err = -EBUSY; goto unlock_mutex; } dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n", pdev_nr, rhport, sockfd); dev_info(dev, "devid(%u) speed(%u) speed_str(%s)\n", devid, speed, usb_speed_string(speed)); vdev->devid = devid; vdev->speed = speed; vdev->ud.sockfd = sockfd; vdev->ud.tcp_socket = socket; vdev->ud.tcp_rx = tcp_rx; vdev->ud.tcp_tx = tcp_tx; vdev->ud.status = VDEV_ST_NOTASSIGNED; usbip_kcov_handle_init(&vdev->ud); spin_unlock(&vdev->ud.lock); spin_unlock_irqrestore(&vhci->lock, flags); /* end the lock */ wake_up_process(vdev->ud.tcp_rx); wake_up_process(vdev->ud.tcp_tx); rh_port_connect(vdev, speed); dev_info(dev, "Device attached\n"); mutex_unlock(&vdev->ud.sysfs_lock); return count; unlock_mutex: mutex_unlock(&vdev->ud.sysfs_lock); return err; } static DEVICE_ATTR_WO(attach); #define MAX_STATUS_NAME 16 struct status_attr { struct device_attribute attr; char name[MAX_STATUS_NAME+1]; }; static struct status_attr *status_attrs; static void set_status_attr(int id) { struct status_attr *status; status = status_attrs + id; if (id == 0) strcpy(status->name, "status"); else snprintf(status->name, MAX_STATUS_NAME+1, "status.%d", id); status->attr.attr.name = status->name; status->attr.attr.mode = S_IRUGO; status->attr.show = status_show; sysfs_attr_init(&status->attr.attr); } static int init_status_attrs(void) { int id; status_attrs = kcalloc(vhci_num_controllers, sizeof(struct status_attr), GFP_KERNEL); if (status_attrs == NULL) return -ENOMEM; for (id = 0; id < vhci_num_controllers; id++) set_status_attr(id); return 0; } static void finish_status_attrs(void) { kfree(status_attrs); } struct attribute_group vhci_attr_group = { .attrs = NULL, }; int vhci_init_attr_group(void) { struct attribute **attrs; int ret, i; attrs = kcalloc((vhci_num_controllers + 5), sizeof(struct attribute *), GFP_KERNEL); if (attrs == NULL) return -ENOMEM; ret = init_status_attrs(); if (ret) { kfree(attrs); return ret; } *attrs = &dev_attr_nports.attr; *(attrs + 1) = &dev_attr_detach.attr; *(attrs + 2) = &dev_attr_attach.attr; *(attrs + 3) = &dev_attr_usbip_debug.attr; for (i = 0; i < vhci_num_controllers; i++) *(attrs + i + 4) = &((status_attrs + i)->attr.attr); vhci_attr_group.attrs = attrs; return 0; } void vhci_finish_attr_group(void) { finish_status_attrs(); kfree(vhci_attr_group.attrs); }
1 110 110 110 110 110 110 110 110 110 9 110 47 110 46 110 110 110 110 110 110 110 110 110 110 110 110 110 110 110 110 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10282 10283 10284 10285 10286 10287 10288 10289 10290 10291 10292 10293 10294 10295 10296 10297 10298 10299 10300 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 10349 10350 10351 10352 10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10382 10383 10384 10385 10386 10387 10388 10389 10390 10391 10392 10393 10394 10395 10396 10397 10398 10399 10400 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10482 10483 10484 10485 10486 10487 10488 10489 10490 10491 10492 10493 10494 10495 10496 10497 10498 10499 10500 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557 10558 10559 10560 10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571 10572 10573 10574 10575 10576 10577 10578 10579 10580 10581 10582 10583 10584 10585 10586 10587 10588 10589 10590 10591 10592 10593 10594 10595 10596 10597 10598 10599 10600 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649 10650 10651 10652 10653 10654 10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680 10681 10682 10683 10684 10685 10686 10687 10688 10689 10690 10691 10692 10693 10694 10695 10696 10697 10698 10699 10700 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 // SPDX-License-Identifier: GPL-2.0 /* * ring buffer based function tracer * * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com> * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> * * Originally taken from the RT patch by: * Arnaldo Carvalho de Melo <acme@redhat.com> * * Based on code from the latency_tracer, that is: * Copyright (C) 2004-2006 Ingo Molnar * Copyright (C) 2004 Nadia Yvette Chambers */ #include <linux/ring_buffer.h> #include <linux/utsname.h> #include <linux/stacktrace.h> #include <linux/writeback.h> #include <linux/kallsyms.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/irqflags.h> #include <linux/debugfs.h> #include <linux/tracefs.h> #include <linux/pagemap.h> #include <linux/hardirq.h> #include <linux/linkage.h> #include <linux/uaccess.h> #include <linux/cleanup.h> #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/splice.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/mount.h> #include <linux/rwsem.h> #include <linux/slab.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/panic_notifier.h> #include <linux/poll.h> #include <linux/nmi.h> #include <linux/fs.h> #include <linux/trace.h> #include <linux/sched/clock.h> #include <linux/sched/rt.h> #include <linux/fsnotify.h> #include <linux/irq_work.h> #include <linux/workqueue.h> #include <asm/setup.h> /* COMMAND_LINE_SIZE */ #include "trace.h" #include "trace_output.h" #ifdef CONFIG_FTRACE_STARTUP_TEST /* * We need to change this state when a selftest is running. * A selftest will lurk into the ring-buffer to count the * entries inserted during the selftest although some concurrent * insertions into the ring-buffer such as trace_printk could occurred * at the same time, giving false positive or negative results. */ static bool __read_mostly tracing_selftest_running; /* * If boot-time tracing including tracers/events via kernel cmdline * is running, we do not want to run SELFTEST. */ bool __read_mostly tracing_selftest_disabled; void __init disable_tracing_selftest(const char *reason) { if (!tracing_selftest_disabled) { tracing_selftest_disabled = true; pr_info("Ftrace startup test is disabled due to %s\n", reason); } } #else #define tracing_selftest_running 0 #define tracing_selftest_disabled 0 #endif /* Pipe tracepoints to printk */ static struct trace_iterator *tracepoint_print_iter; int tracepoint_printk; static bool tracepoint_printk_stop_on_boot __initdata; static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key); /* For tracers that don't implement custom flags */ static struct tracer_opt dummy_tracer_opt[] = { { } }; static int dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) { return 0; } /* * To prevent the comm cache from being overwritten when no * tracing is active, only save the comm when a trace event * occurred. */ DEFINE_PER_CPU(bool, trace_taskinfo_save); /* * Kill all tracing for good (never come back). * It is initialized to 1 but will turn to zero if the initialization * of the tracer is successful. But that is the only place that sets * this back to zero. */ static int tracing_disabled = 1; cpumask_var_t __read_mostly tracing_buffer_mask; /* * ftrace_dump_on_oops - variable to dump ftrace buffer on oops * * If there is an oops (or kernel panic) and the ftrace_dump_on_oops * is set, then ftrace_dump is called. This will output the contents * of the ftrace buffers to the console. This is very useful for * capturing traces that lead to crashes and outputing it to a * serial console. * * It is default off, but you can enable it with either specifying * "ftrace_dump_on_oops" in the kernel command line, or setting * /proc/sys/kernel/ftrace_dump_on_oops * Set 1 if you want to dump buffers of all CPUs * Set 2 if you want to dump the buffer of the CPU that triggered oops * Set instance name if you want to dump the specific trace instance * Multiple instance dump is also supported, and instances are seperated * by commas. */ /* Set to string format zero to disable by default */ char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0"; /* When set, tracing will stop when a WARN*() is hit */ int __disable_trace_on_warning; #ifdef CONFIG_TRACE_EVAL_MAP_FILE /* Map of enums to their values, for "eval_map" file */ struct trace_eval_map_head { struct module *mod; unsigned long length; }; union trace_eval_map_item; struct trace_eval_map_tail { /* * "end" is first and points to NULL as it must be different * than "mod" or "eval_string" */ union trace_eval_map_item *next; const char *end; /* points to NULL */ }; static DEFINE_MUTEX(trace_eval_mutex); /* * The trace_eval_maps are saved in an array with two extra elements, * one at the beginning, and one at the end. The beginning item contains * the count of the saved maps (head.length), and the module they * belong to if not built in (head.mod). The ending item contains a * pointer to the next array of saved eval_map items. */ union trace_eval_map_item { struct trace_eval_map map; struct trace_eval_map_head head; struct trace_eval_map_tail tail; }; static union trace_eval_map_item *trace_eval_maps; #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ int tracing_set_tracer(struct trace_array *tr, const char *buf); static void ftrace_trace_userstack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx); static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata; static char *default_bootup_tracer; static bool allocate_snapshot; static bool snapshot_at_boot; static char boot_instance_info[COMMAND_LINE_SIZE] __initdata; static int boot_instance_index; static char boot_snapshot_info[COMMAND_LINE_SIZE] __initdata; static int boot_snapshot_index; static int __init set_cmdline_ftrace(char *str) { strscpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); default_bootup_tracer = bootup_tracer_buf; /* We are using ftrace early, expand it */ trace_set_ring_buffer_expanded(NULL); return 1; } __setup("ftrace=", set_cmdline_ftrace); int ftrace_dump_on_oops_enabled(void) { if (!strcmp("0", ftrace_dump_on_oops)) return 0; else return 1; } static int __init set_ftrace_dump_on_oops(char *str) { if (!*str) { strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); return 1; } if (*str == ',') { strscpy(ftrace_dump_on_oops, "1", MAX_TRACER_SIZE); strscpy(ftrace_dump_on_oops + 1, str, MAX_TRACER_SIZE - 1); return 1; } if (*str++ == '=') { strscpy(ftrace_dump_on_oops, str, MAX_TRACER_SIZE); return 1; } return 0; } __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); static int __init stop_trace_on_warning(char *str) { if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) __disable_trace_on_warning = 1; return 1; } __setup("traceoff_on_warning", stop_trace_on_warning); static int __init boot_alloc_snapshot(char *str) { char *slot = boot_snapshot_info + boot_snapshot_index; int left = sizeof(boot_snapshot_info) - boot_snapshot_index; int ret; if (str[0] == '=') { str++; if (strlen(str) >= left) return -1; ret = snprintf(slot, left, "%s\t", str); boot_snapshot_index += ret; } else { allocate_snapshot = true; /* We also need the main ring buffer expanded */ trace_set_ring_buffer_expanded(NULL); } return 1; } __setup("alloc_snapshot", boot_alloc_snapshot); static int __init boot_snapshot(char *str) { snapshot_at_boot = true; boot_alloc_snapshot(str); return 1; } __setup("ftrace_boot_snapshot", boot_snapshot); static int __init boot_instance(char *str) { char *slot = boot_instance_info + boot_instance_index; int left = sizeof(boot_instance_info) - boot_instance_index; int ret; if (strlen(str) >= left) return -1; ret = snprintf(slot, left, "%s\t", str); boot_instance_index += ret; return 1; } __setup("trace_instance=", boot_instance); static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static int __init set_trace_boot_options(char *str) { strscpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); return 1; } __setup("trace_options=", set_trace_boot_options); static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata; static char *trace_boot_clock __initdata; static int __init set_trace_boot_clock(char *str) { strscpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE); trace_boot_clock = trace_boot_clock_buf; return 1; } __setup("trace_clock=", set_trace_boot_clock); static int __init set_tracepoint_printk(char *str) { /* Ignore the "tp_printk_stop_on_boot" param */ if (*str == '_') return 0; if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) tracepoint_printk = 1; return 1; } __setup("tp_printk", set_tracepoint_printk); static int __init set_tracepoint_printk_stop(char *str) { tracepoint_printk_stop_on_boot = true; return 1; } __setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop); unsigned long long ns2usecs(u64 nsec) { nsec += 500; do_div(nsec, 1000); return nsec; } static void trace_process_export(struct trace_export *export, struct ring_buffer_event *event, int flag) { struct trace_entry *entry; unsigned int size = 0; if (export->flags & flag) { entry = ring_buffer_event_data(event); size = ring_buffer_event_length(event); export->write(export, entry, size); } } static DEFINE_MUTEX(ftrace_export_lock); static struct trace_export __rcu *ftrace_exports_list __read_mostly; static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled); static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled); static DEFINE_STATIC_KEY_FALSE(trace_marker_exports_enabled); static inline void ftrace_exports_enable(struct trace_export *export) { if (export->flags & TRACE_EXPORT_FUNCTION) static_branch_inc(&trace_function_exports_enabled); if (export->flags & TRACE_EXPORT_EVENT) static_branch_inc(&trace_event_exports_enabled); if (export->flags & TRACE_EXPORT_MARKER) static_branch_inc(&trace_marker_exports_enabled); } static inline void ftrace_exports_disable(struct trace_export *export) { if (export->flags & TRACE_EXPORT_FUNCTION) static_branch_dec(&trace_function_exports_enabled); if (export->flags & TRACE_EXPORT_EVENT) static_branch_dec(&trace_event_exports_enabled); if (export->flags & TRACE_EXPORT_MARKER) static_branch_dec(&trace_marker_exports_enabled); } static void ftrace_exports(struct ring_buffer_event *event, int flag) { struct trace_export *export; preempt_disable_notrace(); export = rcu_dereference_raw_check(ftrace_exports_list); while (export) { trace_process_export(export, event, flag); export = rcu_dereference_raw_check(export->next); } preempt_enable_notrace(); } static inline void add_trace_export(struct trace_export **list, struct trace_export *export) { rcu_assign_pointer(export->next, *list); /* * We are entering export into the list but another * CPU might be walking that list. We need to make sure * the export->next pointer is valid before another CPU sees * the export pointer included into the list. */ rcu_assign_pointer(*list, export); } static inline int rm_trace_export(struct trace_export **list, struct trace_export *export) { struct trace_export **p; for (p = list; *p != NULL; p = &(*p)->next) if (*p == export) break; if (*p != export) return -1; rcu_assign_pointer(*p, (*p)->next); return 0; } static inline void add_ftrace_export(struct trace_export **list, struct trace_export *export) { ftrace_exports_enable(export); add_trace_export(list, export); } static inline int rm_ftrace_export(struct trace_export **list, struct trace_export *export) { int ret; ret = rm_trace_export(list, export); ftrace_exports_disable(export); return ret; } int register_ftrace_export(struct trace_export *export) { if (WARN_ON_ONCE(!export->write)) return -1; mutex_lock(&ftrace_export_lock); add_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return 0; } EXPORT_SYMBOL_GPL(register_ftrace_export); int unregister_ftrace_export(struct trace_export *export) { int ret; mutex_lock(&ftrace_export_lock); ret = rm_ftrace_export(&ftrace_exports_list, export); mutex_unlock(&ftrace_export_lock); return ret; } EXPORT_SYMBOL_GPL(unregister_ftrace_export); /* trace_flags holds trace_options default values */ #define TRACE_DEFAULT_FLAGS \ (FUNCTION_DEFAULT_FLAGS | \ TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \ TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \ TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | \ TRACE_ITER_HASH_PTR | TRACE_ITER_TRACE_PRINTK) /* trace_options that are only supported by global_trace */ #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \ TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD) /* trace_flags that are default zero for instances */ #define ZEROED_TRACE_FLAGS \ (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK | TRACE_ITER_TRACE_PRINTK) /* * The global_trace is the descriptor that holds the top-level tracing * buffers for the live tracing. */ static struct trace_array global_trace = { .trace_flags = TRACE_DEFAULT_FLAGS, }; static struct trace_array *printk_trace = &global_trace; static __always_inline bool printk_binsafe(struct trace_array *tr) { /* * The binary format of traceprintk can cause a crash if used * by a buffer from another boot. Force the use of the * non binary version of trace_printk if the trace_printk * buffer is a boot mapped ring buffer. */ return !(tr->flags & TRACE_ARRAY_FL_BOOT); } static void update_printk_trace(struct trace_array *tr) { if (printk_trace == tr) return; printk_trace->trace_flags &= ~TRACE_ITER_TRACE_PRINTK; printk_trace = tr; tr->trace_flags |= TRACE_ITER_TRACE_PRINTK; } void trace_set_ring_buffer_expanded(struct trace_array *tr) { if (!tr) tr = &global_trace; tr->ring_buffer_expanded = true; } LIST_HEAD(ftrace_trace_arrays); int trace_array_get(struct trace_array *this_tr) { struct trace_array *tr; guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) { tr->ref++; return 0; } } return -ENODEV; } static void __trace_array_put(struct trace_array *this_tr) { WARN_ON(!this_tr->ref); this_tr->ref--; } /** * trace_array_put - Decrement the reference counter for this trace array. * @this_tr : pointer to the trace array * * NOTE: Use this when we no longer need the trace array returned by * trace_array_get_by_name(). This ensures the trace array can be later * destroyed. * */ void trace_array_put(struct trace_array *this_tr) { if (!this_tr) return; mutex_lock(&trace_types_lock); __trace_array_put(this_tr); mutex_unlock(&trace_types_lock); } EXPORT_SYMBOL_GPL(trace_array_put); int tracing_check_open_get_tr(struct trace_array *tr) { int ret; ret = security_locked_down(LOCKDOWN_TRACEFS); if (ret) return ret; if (tracing_disabled) return -ENODEV; if (tr && trace_array_get(tr) < 0) return -ENODEV; return 0; } /** * trace_find_filtered_pid - check if a pid exists in a filtered_pid list * @filtered_pids: The list of pids to check * @search_pid: The PID to find in @filtered_pids * * Returns true if @search_pid is found in @filtered_pids, and false otherwise. */ bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid) { return trace_pid_list_is_set(filtered_pids, search_pid); } /** * trace_ignore_this_task - should a task be ignored for tracing * @filtered_pids: The list of pids to check * @filtered_no_pids: The list of pids not to be traced * @task: The task that should be ignored if not filtered * * Checks if @task should be traced or not from @filtered_pids. * Returns true if @task should *NOT* be traced. * Returns false if @task should be traced. */ bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct trace_pid_list *filtered_no_pids, struct task_struct *task) { /* * If filtered_no_pids is not empty, and the task's pid is listed * in filtered_no_pids, then return true. * Otherwise, if filtered_pids is empty, that means we can * trace all tasks. If it has content, then only trace pids * within filtered_pids. */ return (filtered_pids && !trace_find_filtered_pid(filtered_pids, task->pid)) || (filtered_no_pids && trace_find_filtered_pid(filtered_no_pids, task->pid)); } /** * trace_filter_add_remove_task - Add or remove a task from a pid_list * @pid_list: The list to modify * @self: The current task for fork or NULL for exit * @task: The task to add or remove * * If adding a task, if @self is defined, the task is only added if @self * is also included in @pid_list. This happens on fork and tasks should * only be added when the parent is listed. If @self is NULL, then the * @task pid will be removed from the list, which would happen on exit * of a task. */ void trace_filter_add_remove_task(struct trace_pid_list *pid_list, struct task_struct *self, struct task_struct *task) { if (!pid_list) return; /* For forks, we only add if the forking task is listed */ if (self) { if (!trace_find_filtered_pid(pid_list, self->pid)) return; } /* "self" is set for forks, and NULL for exits */ if (self) trace_pid_list_set(pid_list, task->pid); else trace_pid_list_clear(pid_list, task->pid); } /** * trace_pid_next - Used for seq_file to get to the next pid of a pid_list * @pid_list: The pid list to show * @v: The last pid that was shown (+1 the actual pid to let zero be displayed) * @pos: The position of the file * * This is used by the seq_file "next" operation to iterate the pids * listed in a trace_pid_list structure. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos) { long pid = (unsigned long)v; unsigned int next; (*pos)++; /* pid already is +1 of the actual previous bit */ if (trace_pid_list_next(pid_list, pid, &next) < 0) return NULL; pid = next; /* Return pid + 1 to allow zero to be represented */ return (void *)(pid + 1); } /** * trace_pid_start - Used for seq_file to start reading pid lists * @pid_list: The pid list to show * @pos: The position of the file * * This is used by seq_file "start" operation to start the iteration * of listing pids. * * Returns the pid+1 as we want to display pid of zero, but NULL would * stop the iteration. */ void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos) { unsigned long pid; unsigned int first; loff_t l = 0; if (trace_pid_list_first(pid_list, &first) < 0) return NULL; pid = first; /* Return pid + 1 so that zero can be the exit value */ for (pid++; pid && l < *pos; pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l)) ; return (void *)pid; } /** * trace_pid_show - show the current pid in seq_file processing * @m: The seq_file structure to write into * @v: A void pointer of the pid (+1) value to display * * Can be directly used by seq_file operations to display the current * pid value. */ int trace_pid_show(struct seq_file *m, void *v) { unsigned long pid = (unsigned long)v - 1; seq_printf(m, "%lu\n", pid); return 0; } /* 128 should be much more than enough */ #define PID_BUF_SIZE 127 int trace_pid_write(struct trace_pid_list *filtered_pids, struct trace_pid_list **new_pid_list, const char __user *ubuf, size_t cnt) { struct trace_pid_list *pid_list; struct trace_parser parser; unsigned long val; int nr_pids = 0; ssize_t read = 0; ssize_t ret; loff_t pos; pid_t pid; if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1)) return -ENOMEM; /* * Always recreate a new array. The write is an all or nothing * operation. Always create a new array when adding new pids by * the user. If the operation fails, then the current list is * not modified. */ pid_list = trace_pid_list_alloc(); if (!pid_list) { trace_parser_put(&parser); return -ENOMEM; } if (filtered_pids) { /* copy the current bits to the new max */ ret = trace_pid_list_first(filtered_pids, &pid); while (!ret) { trace_pid_list_set(pid_list, pid); ret = trace_pid_list_next(filtered_pids, pid + 1, &pid); nr_pids++; } } ret = 0; while (cnt > 0) { pos = 0; ret = trace_get_user(&parser, ubuf, cnt, &pos); if (ret < 0) break; read += ret; ubuf += ret; cnt -= ret; if (!trace_parser_loaded(&parser)) break; ret = -EINVAL; if (kstrtoul(parser.buffer, 0, &val)) break; pid = (pid_t)val; if (trace_pid_list_set(pid_list, pid) < 0) { ret = -1; break; } nr_pids++; trace_parser_clear(&parser); ret = 0; } trace_parser_put(&parser); if (ret < 0) { trace_pid_list_free(pid_list); return ret; } if (!nr_pids) { /* Cleared the list of pids */ trace_pid_list_free(pid_list); pid_list = NULL; } *new_pid_list = pid_list; return read; } static u64 buffer_ftrace_now(struct array_buffer *buf, int cpu) { u64 ts; /* Early boot up does not have a buffer yet */ if (!buf->buffer) return trace_clock_local(); ts = ring_buffer_time_stamp(buf->buffer); ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); return ts; } u64 ftrace_now(int cpu) { return buffer_ftrace_now(&global_trace.array_buffer, cpu); } /** * tracing_is_enabled - Show if global_trace has been enabled * * Shows if the global trace has been enabled or not. It uses the * mirror flag "buffer_disabled" to be used in fast paths such as for * the irqsoff tracer. But it may be inaccurate due to races. If you * need to know the accurate state, use tracing_is_on() which is a little * slower, but accurate. */ int tracing_is_enabled(void) { /* * For quick access (irqsoff uses this in fast path), just * return the mirror variable of the state of the ring buffer. * It's a little racy, but we don't really care. */ smp_rmb(); return !global_trace.buffer_disabled; } /* * trace_buf_size is the size in bytes that is allocated * for a buffer. Note, the number of bytes is always rounded * to page size. * * This number is purposely set to a low number of 16384. * If the dump on oops happens, it will be much appreciated * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; /* * trace_types_lock is used to protect the trace_types list. */ DEFINE_MUTEX(trace_types_lock); /* * serialize the access of the ring buffer * * ring buffer serializes readers, but it is low level protection. * The validity of the events (which returns by ring_buffer_peek() ..etc) * are not protected by ring buffer. * * The content of events may become garbage if we allow other process consumes * these events concurrently: * A) the page of the consumed events may become a normal page * (not reader page) in ring buffer, and this page will be rewritten * by events producer. * B) The page of the consumed events may become a page for splice_read, * and this page will be returned to system. * * These primitives allow multi process access to different cpu ring buffer * concurrently. * * These primitives don't distinguish read-only and read-consume access. * Multi read-only access are also serialized. */ #ifdef CONFIG_SMP static DECLARE_RWSEM(all_cpu_access_lock); static DEFINE_PER_CPU(struct mutex, cpu_access_lock); static inline void trace_access_lock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { /* gain it for accessing the whole ring buffer. */ down_write(&all_cpu_access_lock); } else { /* gain it for accessing a cpu ring buffer. */ /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */ down_read(&all_cpu_access_lock); /* Secondly block other access to this @cpu ring buffer. */ mutex_lock(&per_cpu(cpu_access_lock, cpu)); } } static inline void trace_access_unlock(int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { up_write(&all_cpu_access_lock); } else { mutex_unlock(&per_cpu(cpu_access_lock, cpu)); up_read(&all_cpu_access_lock); } } static inline void trace_access_lock_init(void) { int cpu; for_each_possible_cpu(cpu) mutex_init(&per_cpu(cpu_access_lock, cpu)); } #else static DEFINE_MUTEX(access_lock); static inline void trace_access_lock(int cpu) { (void)cpu; mutex_lock(&access_lock); } static inline void trace_access_unlock(int cpu) { (void)cpu; mutex_unlock(&access_lock); } static inline void trace_access_lock_init(void) { } #endif #ifdef CONFIG_STACKTRACE static void __ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs); static inline void ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs); #else static inline void __ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs) { } static inline void ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned long trace_ctx, int skip, struct pt_regs *regs) { } #endif static __always_inline void trace_event_setup(struct ring_buffer_event *event, int type, unsigned int trace_ctx) { struct trace_entry *ent = ring_buffer_event_data(event); tracing_generic_entry_update(ent, type, trace_ctx); } static __always_inline struct ring_buffer_event * __trace_buffer_lock_reserve(struct trace_buffer *buffer, int type, unsigned long len, unsigned int trace_ctx) { struct ring_buffer_event *event; event = ring_buffer_lock_reserve(buffer, len); if (event != NULL) trace_event_setup(event, type, trace_ctx); return event; } void tracer_tracing_on(struct trace_array *tr) { if (tr->array_buffer.buffer) ring_buffer_record_on(tr->array_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 0; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_on - enable tracing buffers * * This function enables tracing buffers that may have been * disabled with tracing_off. */ void tracing_on(void) { tracer_tracing_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_on); static __always_inline void __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event) { __this_cpu_write(trace_taskinfo_save, true); /* If this is the temp buffer, we need to commit fully */ if (this_cpu_read(trace_buffered_event) == event) { /* Length is in event->array[0] */ ring_buffer_write(buffer, event->array[0], &event->array[1]); /* Release the temp buffer */ this_cpu_dec(trace_buffered_event_cnt); /* ring_buffer_unlock_commit() enables preemption */ preempt_enable_notrace(); } else ring_buffer_unlock_commit(buffer); } int __trace_array_puts(struct trace_array *tr, unsigned long ip, const char *str, int size) { struct ring_buffer_event *event; struct trace_buffer *buffer; struct print_entry *entry; unsigned int trace_ctx; int alloc; if (!(tr->trace_flags & TRACE_ITER_PRINTK)) return 0; if (unlikely(tracing_selftest_running && tr == &global_trace)) return 0; if (unlikely(tracing_disabled)) return 0; alloc = sizeof(*entry) + size + 2; /* possible \n added */ trace_ctx = tracing_gen_ctx(); buffer = tr->array_buffer.buffer; ring_buffer_nest_start(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, trace_ctx); if (!event) { size = 0; goto out; } entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, str, size); /* Add a newline if necessary */ if (entry->buf[size - 1] != '\n') { entry->buf[size] = '\n'; entry->buf[size + 1] = '\0'; } else entry->buf[size] = '\0'; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); out: ring_buffer_nest_end(buffer); return size; } EXPORT_SYMBOL_GPL(__trace_array_puts); /** * __trace_puts - write a constant string into the trace buffer. * @ip: The address of the caller * @str: The constant string to write * @size: The size of the string. */ int __trace_puts(unsigned long ip, const char *str, int size) { return __trace_array_puts(printk_trace, ip, str, size); } EXPORT_SYMBOL_GPL(__trace_puts); /** * __trace_bputs - write the pointer to a constant string into trace buffer * @ip: The address of the caller * @str: The constant string to write to the buffer to */ int __trace_bputs(unsigned long ip, const char *str) { struct trace_array *tr = READ_ONCE(printk_trace); struct ring_buffer_event *event; struct trace_buffer *buffer; struct bputs_entry *entry; unsigned int trace_ctx; int size = sizeof(struct bputs_entry); int ret = 0; if (!printk_binsafe(tr)) return __trace_puts(ip, str, strlen(str)); if (!(tr->trace_flags & TRACE_ITER_PRINTK)) return 0; if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; trace_ctx = tracing_gen_ctx(); buffer = tr->array_buffer.buffer; ring_buffer_nest_start(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, trace_ctx); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->str = str; __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, trace_ctx, 4, NULL); ret = 1; out: ring_buffer_nest_end(buffer); return ret; } EXPORT_SYMBOL_GPL(__trace_bputs); #ifdef CONFIG_TRACER_SNAPSHOT static void tracing_snapshot_instance_cond(struct trace_array *tr, void *cond_data) { struct tracer *tracer = tr->current_trace; unsigned long flags; if (in_nmi()) { trace_array_puts(tr, "*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n"); trace_array_puts(tr, "*** snapshot is being ignored ***\n"); return; } if (!tr->allocated_snapshot) { trace_array_puts(tr, "*** SNAPSHOT NOT ALLOCATED ***\n"); trace_array_puts(tr, "*** stopping trace here! ***\n"); tracer_tracing_off(tr); return; } /* Note, snapshot can not be used when the tracer uses it */ if (tracer->use_max_tr) { trace_array_puts(tr, "*** LATENCY TRACER ACTIVE ***\n"); trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); return; } if (tr->mapped) { trace_array_puts(tr, "*** BUFFER MEMORY MAPPED ***\n"); trace_array_puts(tr, "*** Can not use snapshot (sorry) ***\n"); return; } local_irq_save(flags); update_max_tr(tr, current, smp_processor_id(), cond_data); local_irq_restore(flags); } void tracing_snapshot_instance(struct trace_array *tr) { tracing_snapshot_instance_cond(tr, NULL); } /** * tracing_snapshot - take a snapshot of the current buffer. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. * * Note, make sure to allocate the snapshot with either * a tracing_snapshot_alloc(), or by doing it manually * with: echo 1 > /sys/kernel/tracing/snapshot * * If the snapshot buffer is not allocated, it will stop tracing. * Basically making a permanent snapshot. */ void tracing_snapshot(void) { struct trace_array *tr = &global_trace; tracing_snapshot_instance(tr); } EXPORT_SYMBOL_GPL(tracing_snapshot); /** * tracing_snapshot_cond - conditionally take a snapshot of the current buffer. * @tr: The tracing instance to snapshot * @cond_data: The data to be tested conditionally, and possibly saved * * This is the same as tracing_snapshot() except that the snapshot is * conditional - the snapshot will only happen if the * cond_snapshot.update() implementation receiving the cond_data * returns true, which means that the trace array's cond_snapshot * update() operation used the cond_data to determine whether the * snapshot should be taken, and if it was, presumably saved it along * with the snapshot. */ void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) { tracing_snapshot_instance_cond(tr, cond_data); } EXPORT_SYMBOL_GPL(tracing_snapshot_cond); /** * tracing_cond_snapshot_data - get the user data associated with a snapshot * @tr: The tracing instance * * When the user enables a conditional snapshot using * tracing_snapshot_cond_enable(), the user-defined cond_data is saved * with the snapshot. This accessor is used to retrieve it. * * Should not be called from cond_snapshot.update(), since it takes * the tr->max_lock lock, which the code calling * cond_snapshot.update() has already done. * * Returns the cond_data associated with the trace array's snapshot. */ void *tracing_cond_snapshot_data(struct trace_array *tr) { void *cond_data = NULL; local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) cond_data = tr->cond_snapshot->cond_data; arch_spin_unlock(&tr->max_lock); local_irq_enable(); return cond_data; } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, struct array_buffer *size_buf, int cpu_id); static void set_buffer_entries(struct array_buffer *buf, unsigned long val); int tracing_alloc_snapshot_instance(struct trace_array *tr) { int order; int ret; if (!tr->allocated_snapshot) { /* Make the snapshot buffer have the same order as main buffer */ order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); if (ret < 0) return ret; /* allocate spare buffer */ ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->array_buffer, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; tr->allocated_snapshot = true; } return 0; } static void free_snapshot(struct trace_array *tr) { /* * We don't free the ring buffer. instead, resize it because * The max_tr ring buffer has some state (e.g. ring->clock) and * we want preserve it. */ ring_buffer_subbuf_order_set(tr->max_buffer.buffer, 0); ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); set_buffer_entries(&tr->max_buffer, 1); tracing_reset_online_cpus(&tr->max_buffer); tr->allocated_snapshot = false; } static int tracing_arm_snapshot_locked(struct trace_array *tr) { int ret; lockdep_assert_held(&trace_types_lock); spin_lock(&tr->snapshot_trigger_lock); if (tr->snapshot == UINT_MAX || tr->mapped) { spin_unlock(&tr->snapshot_trigger_lock); return -EBUSY; } tr->snapshot++; spin_unlock(&tr->snapshot_trigger_lock); ret = tracing_alloc_snapshot_instance(tr); if (ret) { spin_lock(&tr->snapshot_trigger_lock); tr->snapshot--; spin_unlock(&tr->snapshot_trigger_lock); } return ret; } int tracing_arm_snapshot(struct trace_array *tr) { int ret; mutex_lock(&trace_types_lock); ret = tracing_arm_snapshot_locked(tr); mutex_unlock(&trace_types_lock); return ret; } void tracing_disarm_snapshot(struct trace_array *tr) { spin_lock(&tr->snapshot_trigger_lock); if (!WARN_ON(!tr->snapshot)) tr->snapshot--; spin_unlock(&tr->snapshot_trigger_lock); } /** * tracing_alloc_snapshot - allocate snapshot buffer. * * This only allocates the snapshot buffer if it isn't already * allocated - it doesn't also take a snapshot. * * This is meant to be used in cases where the snapshot buffer needs * to be set up for events that can't sleep but need to be able to * trigger a snapshot. */ int tracing_alloc_snapshot(void) { struct trace_array *tr = &global_trace; int ret; ret = tracing_alloc_snapshot_instance(tr); WARN_ON(ret < 0); return ret; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); /** * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. * * This is similar to tracing_snapshot(), but it will allocate the * snapshot buffer if it isn't already allocated. Use this only * where it is safe to sleep, as the allocation may sleep. * * This causes a swap between the snapshot buffer and the current live * tracing buffer. You can use this to take snapshots of the live * trace when some condition is triggered, but continue to trace. */ void tracing_snapshot_alloc(void) { int ret; ret = tracing_alloc_snapshot(); if (ret < 0) return; tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); /** * tracing_snapshot_cond_enable - enable conditional snapshot for an instance * @tr: The tracing instance * @cond_data: User data to associate with the snapshot * @update: Implementation of the cond_snapshot update function * * Check whether the conditional snapshot for the given instance has * already been enabled, or if the current tracer is already using a * snapshot; if so, return -EBUSY, else create a cond_snapshot and * save the cond_data and update function inside. * * Returns 0 if successful, error otherwise. */ int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { struct cond_snapshot *cond_snapshot __free(kfree) = kzalloc(sizeof(*cond_snapshot), GFP_KERNEL); int ret; if (!cond_snapshot) return -ENOMEM; cond_snapshot->cond_data = cond_data; cond_snapshot->update = update; guard(mutex)(&trace_types_lock); if (tr->current_trace->use_max_tr) return -EBUSY; /* * The cond_snapshot can only change to NULL without the * trace_types_lock. We don't care if we race with it going * to NULL, but we want to make sure that it's not set to * something other than NULL when we get here, which we can * do safely with only holding the trace_types_lock and not * having to take the max_lock. */ if (tr->cond_snapshot) return -EBUSY; ret = tracing_arm_snapshot_locked(tr); if (ret) return ret; local_irq_disable(); arch_spin_lock(&tr->max_lock); tr->cond_snapshot = no_free_ptr(cond_snapshot); arch_spin_unlock(&tr->max_lock); local_irq_enable(); return 0; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); /** * tracing_snapshot_cond_disable - disable conditional snapshot for an instance * @tr: The tracing instance * * Check whether the conditional snapshot for the given instance is * enabled; if so, free the cond_snapshot associated with it, * otherwise return -EINVAL. * * Returns 0 if successful, error otherwise. */ int tracing_snapshot_cond_disable(struct trace_array *tr) { int ret = 0; local_irq_disable(); arch_spin_lock(&tr->max_lock); if (!tr->cond_snapshot) ret = -EINVAL; else { kfree(tr->cond_snapshot); tr->cond_snapshot = NULL; } arch_spin_unlock(&tr->max_lock); local_irq_enable(); tracing_disarm_snapshot(tr); return ret; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); #else void tracing_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot); void tracing_snapshot_cond(struct trace_array *tr, void *cond_data) { WARN_ONCE(1, "Snapshot feature not enabled, but internal conditional snapshot used"); } EXPORT_SYMBOL_GPL(tracing_snapshot_cond); int tracing_alloc_snapshot(void) { WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used"); return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); void tracing_snapshot_alloc(void) { /* Give warning */ tracing_snapshot(); } EXPORT_SYMBOL_GPL(tracing_snapshot_alloc); void *tracing_cond_snapshot_data(struct trace_array *tr) { return NULL; } EXPORT_SYMBOL_GPL(tracing_cond_snapshot_data); int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update) { return -ENODEV; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_enable); int tracing_snapshot_cond_disable(struct trace_array *tr) { return false; } EXPORT_SYMBOL_GPL(tracing_snapshot_cond_disable); #define free_snapshot(tr) do { } while (0) #define tracing_arm_snapshot_locked(tr) ({ -EBUSY; }) #endif /* CONFIG_TRACER_SNAPSHOT */ void tracer_tracing_off(struct trace_array *tr) { if (tr->array_buffer.buffer) ring_buffer_record_off(tr->array_buffer.buffer); /* * This flag is looked at when buffers haven't been allocated * yet, or by some tracers (like irqsoff), that just want to * know if the ring buffer has been disabled, but it can handle * races of where it gets disabled but we still do a record. * As the check is in the fast path of the tracers, it is more * important to be fast than accurate. */ tr->buffer_disabled = 1; /* Make the flag seen by readers */ smp_wmb(); } /** * tracing_off - turn off tracing buffers * * This function stops the tracing buffers from recording data. * It does not disable any overhead the tracers themselves may * be causing. This function simply causes all recording to * the ring buffers to fail. */ void tracing_off(void) { tracer_tracing_off(&global_trace); } EXPORT_SYMBOL_GPL(tracing_off); void disable_trace_on_warning(void) { if (__disable_trace_on_warning) { trace_array_printk_buf(global_trace.array_buffer.buffer, _THIS_IP_, "Disabling tracing due to warning\n"); tracing_off(); } } /** * tracer_tracing_is_on - show real state of ring buffer enabled * @tr : the trace array to know if ring buffer is enabled * * Shows real state of the ring buffer if it is enabled or not. */ bool tracer_tracing_is_on(struct trace_array *tr) { if (tr->array_buffer.buffer) return ring_buffer_record_is_set_on(tr->array_buffer.buffer); return !tr->buffer_disabled; } /** * tracing_is_on - show state of ring buffers enabled */ int tracing_is_on(void) { return tracer_tracing_is_on(&global_trace); } EXPORT_SYMBOL_GPL(tracing_is_on); static int __init set_buf_size(char *str) { unsigned long buf_size; if (!str) return 0; buf_size = memparse(str, &str); /* * nr_entries can not be zero and the startup * tests require some buffer space. Therefore * ensure we have at least 4096 bytes of buffer. */ trace_buf_size = max(4096UL, buf_size); return 1; } __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { unsigned long threshold; int ret; if (!str) return 0; ret = kstrtoul(str, 0, &threshold); if (ret < 0) return 0; tracing_thresh = threshold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); unsigned long nsecs_to_usecs(unsigned long nsecs) { return nsecs / 1000; } /* * TRACE_FLAGS is defined as a tuple matching bit masks with strings. * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list * of strings in the order that the evals (enum) were defined. */ #undef C #define C(a, b) b /* These must match the bit positions in trace_iterator_flags */ static const char *trace_options[] = { TRACE_FLAGS NULL }; static struct { u64 (*func)(void); const char *name; int in_ns; /* is this clock in nanoseconds? */ } trace_clocks[] = { { trace_clock_local, "local", 1 }, { trace_clock_global, "global", 1 }, { trace_clock_counter, "counter", 0 }, { trace_clock_jiffies, "uptime", 0 }, { trace_clock, "perf", 1 }, { ktime_get_mono_fast_ns, "mono", 1 }, { ktime_get_raw_fast_ns, "mono_raw", 1 }, { ktime_get_boot_fast_ns, "boot", 1 }, { ktime_get_tai_fast_ns, "tai", 1 }, ARCH_TRACE_CLOCKS }; bool trace_clock_in_ns(struct trace_array *tr) { if (trace_clocks[tr->clock_id].in_ns) return true; return false; } /* * trace_parser_get_init - gets the buffer for trace parser */ int trace_parser_get_init(struct trace_parser *parser, int size) { memset(parser, 0, sizeof(*parser)); parser->buffer = kmalloc(size, GFP_KERNEL); if (!parser->buffer) return 1; parser->size = size; return 0; } /* * trace_parser_put - frees the buffer for trace parser */ void trace_parser_put(struct trace_parser *parser) { kfree(parser->buffer); parser->buffer = NULL; } /* * trace_get_user - reads the user input string separated by space * (matched by isspace(ch)) * * For each string found the 'struct trace_parser' is updated, * and the function returns. * * Returns number of bytes read. * * See kernel/trace/trace.h for 'struct trace_parser' details. */ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, size_t cnt, loff_t *ppos) { char ch; size_t read = 0; ssize_t ret; if (!*ppos) trace_parser_clear(parser); ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; /* * The parser is not finished with the last write, * continue reading the user input without skipping spaces. */ if (!parser->cont) { /* skip white space */ while (cnt && isspace(ch)) { ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } parser->idx = 0; /* only spaces were written */ if (isspace(ch) || !ch) { *ppos += read; ret = read; goto out; } } /* read the non-space input */ while (cnt && !isspace(ch) && ch) { if (parser->idx < parser->size - 1) parser->buffer[parser->idx++] = ch; else { ret = -EINVAL; goto out; } ret = get_user(ch, ubuf++); if (ret) goto out; read++; cnt--; } /* We either got finished input or we have to wait for another call. */ if (isspace(ch) || !ch) { parser->buffer[parser->idx] = 0; parser->cont = false; } else if (parser->idx < parser->size - 1) { parser->cont = true; parser->buffer[parser->idx++] = ch; /* Make sure the parsed string always terminates with '\0'. */ parser->buffer[parser->idx] = 0; } else { ret = -EINVAL; goto out; } *ppos += read; ret = read; out: return ret; } /* TODO add a seq_buf_to_buffer() */ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) { int len; if (trace_seq_used(s) <= s->readpos) return -EBUSY; len = trace_seq_used(s) - s->readpos; if (cnt > len) cnt = len; memcpy(buf, s->buffer + s->readpos, cnt); s->readpos += cnt; return cnt; } unsigned long __read_mostly tracing_thresh; #ifdef CONFIG_TRACER_MAX_TRACE static const struct file_operations tracing_max_lat_fops; #ifdef LATENCY_FS_NOTIFY static struct workqueue_struct *fsnotify_wq; static void latency_fsnotify_workfn(struct work_struct *work) { struct trace_array *tr = container_of(work, struct trace_array, fsnotify_work); fsnotify_inode(tr->d_max_latency->d_inode, FS_MODIFY); } static void latency_fsnotify_workfn_irq(struct irq_work *iwork) { struct trace_array *tr = container_of(iwork, struct trace_array, fsnotify_irqwork); queue_work(fsnotify_wq, &tr->fsnotify_work); } static void trace_create_maxlat_file(struct trace_array *tr, struct dentry *d_tracer) { INIT_WORK(&tr->fsnotify_work, latency_fsnotify_workfn); init_irq_work(&tr->fsnotify_irqwork, latency_fsnotify_workfn_irq); tr->d_max_latency = trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, d_tracer, tr, &tracing_max_lat_fops); } __init static int latency_fsnotify_init(void) { fsnotify_wq = alloc_workqueue("tr_max_lat_wq", WQ_UNBOUND | WQ_HIGHPRI, 0); if (!fsnotify_wq) { pr_err("Unable to allocate tr_max_lat_wq\n"); return -ENOMEM; } return 0; } late_initcall_sync(latency_fsnotify_init); void latency_fsnotify(struct trace_array *tr) { if (!fsnotify_wq) return; /* * We cannot call queue_work(&tr->fsnotify_work) from here because it's * possible that we are called from __schedule() or do_idle(), which * could cause a deadlock. */ irq_work_queue(&tr->fsnotify_irqwork); } #else /* !LATENCY_FS_NOTIFY */ #define trace_create_maxlat_file(tr, d_tracer) \ trace_create_file("tracing_max_latency", TRACE_MODE_WRITE, \ d_tracer, tr, &tracing_max_lat_fops) #endif /* * Copy the new maximum trace into the separate maximum-trace * structure. (this way the maximum trace is permanently saved, * for later retrieval via /sys/kernel/tracing/tracing_max_latency) */ static void __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) { struct array_buffer *trace_buf = &tr->array_buffer; struct array_buffer *max_buf = &tr->max_buffer; struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); max_buf->cpu = cpu; max_buf->time_start = data->preempt_timestamp; max_data->saved_latency = tr->max_latency; max_data->critical_start = data->critical_start; max_data->critical_end = data->critical_end; strscpy(max_data->comm, tsk->comm); max_data->pid = tsk->pid; /* * If tsk == current, then use current_uid(), as that does not use * RCU. The irq tracer can be called out of RCU scope. */ if (tsk == current) max_data->uid = current_uid(); else max_data->uid = task_uid(tsk); max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; /* record this tasks comm */ tracing_record_cmdline(tsk); latency_fsnotify(tr); } /** * update_max_tr - snapshot all trace buffers from global_trace to max_tr * @tr: tracer * @tsk: the task with the latency * @cpu: The cpu that initiated the trace. * @cond_data: User data associated with a conditional snapshot * * Flip the buffers between the @tr and the max_tr and record information * about which task was the cause of this latency. */ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, void *cond_data) { if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); /* Inherit the recordable setting from array_buffer */ if (ring_buffer_record_is_set_on(tr->array_buffer.buffer)) ring_buffer_record_on(tr->max_buffer.buffer); else ring_buffer_record_off(tr->max_buffer.buffer); #ifdef CONFIG_TRACER_SNAPSHOT if (tr->cond_snapshot && !tr->cond_snapshot->update(tr, cond_data)) { arch_spin_unlock(&tr->max_lock); return; } #endif swap(tr->array_buffer.buffer, tr->max_buffer.buffer); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); /* Any waiters on the old snapshot buffer need to wake up */ ring_buffer_wake_waiters(tr->array_buffer.buffer, RING_BUFFER_ALL_CPUS); } /** * update_max_tr_single - only copy one trace over, and reset the rest * @tr: tracer * @tsk: task with the latency * @cpu: the cpu of the buffer to copy. * * Flip the trace of a single CPU buffer between the @tr and the max_tr. */ void update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) { int ret; if (tr->stop_count) return; WARN_ON_ONCE(!irqs_disabled()); if (!tr->allocated_snapshot) { /* Only the nop tracer should hit this when disabling */ WARN_ON_ONCE(tr->current_trace != &nop_trace); return; } arch_spin_lock(&tr->max_lock); ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->array_buffer.buffer, cpu); if (ret == -EBUSY) { /* * We failed to swap the buffer due to a commit taking * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. * Another reason is resize is in progress. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, "Failed to swap buffers due to commit or resize in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); __update_max_tr(tr, tsk, cpu); arch_spin_unlock(&tr->max_lock); } #endif /* CONFIG_TRACER_MAX_TRACE */ struct pipe_wait { struct trace_iterator *iter; int wait_index; }; static bool wait_pipe_cond(void *data) { struct pipe_wait *pwait = data; struct trace_iterator *iter = pwait->iter; if (atomic_read_acquire(&iter->wait_index) != pwait->wait_index) return true; return iter->closed; } static int wait_on_pipe(struct trace_iterator *iter, int full) { struct pipe_wait pwait; int ret; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return 0; pwait.wait_index = atomic_read_acquire(&iter->wait_index); pwait.iter = iter; ret = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, full, wait_pipe_cond, &pwait); #ifdef CONFIG_TRACER_MAX_TRACE /* * Make sure this is still the snapshot buffer, as if a snapshot were * to happen, this would now be the main buffer. */ if (iter->snapshot) iter->array_buffer = &iter->tr->max_buffer; #endif return ret; } #ifdef CONFIG_FTRACE_STARTUP_TEST static bool selftests_can_run; struct trace_selftests { struct list_head list; struct tracer *type; }; static LIST_HEAD(postponed_selftests); static int save_selftest(struct tracer *type) { struct trace_selftests *selftest; selftest = kmalloc(sizeof(*selftest), GFP_KERNEL); if (!selftest) return -ENOMEM; selftest->type = type; list_add(&selftest->list, &postponed_selftests); return 0; } static int run_tracer_selftest(struct tracer *type) { struct trace_array *tr = &global_trace; struct tracer *saved_tracer = tr->current_trace; int ret; if (!type->selftest || tracing_selftest_disabled) return 0; /* * If a tracer registers early in boot up (before scheduling is * initialized and such), then do not run its selftests yet. * Instead, run it a little later in the boot process. */ if (!selftests_can_run) return save_selftest(type); if (!tracing_is_on()) { pr_warn("Selftest for tracer %s skipped due to tracing disabled\n", type->name); return 0; } /* * Run a selftest on this tracer. * Here we reset the trace buffer, and set the current * tracer to be this tracer. The tracer can then run some * internal tracing to verify that everything is in order. * If we fail, we do not register this tracer. */ tracing_reset_online_cpus(&tr->array_buffer); tr->current_trace = type; #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { /* If we expanded the buffers, make sure the max is expanded too */ if (tr->ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, RING_BUFFER_ALL_CPUS); tr->allocated_snapshot = true; } #endif /* the test is responsible for initializing and enabling */ pr_info("Testing tracer %s: ", type->name); ret = type->selftest(type, tr); /* the test is responsible for resetting too */ tr->current_trace = saved_tracer; if (ret) { printk(KERN_CONT "FAILED!\n"); /* Add the warning after printing 'FAILED' */ WARN_ON(1); return -1; } /* Only reset on passing, to avoid touching corrupted buffers */ tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (type->use_max_tr) { tr->allocated_snapshot = false; /* Shrink the max buffer again */ if (tr->ring_buffer_expanded) ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); } #endif printk(KERN_CONT "PASSED\n"); return 0; } static int do_run_tracer_selftest(struct tracer *type) { int ret; /* * Tests can take a long time, especially if they are run one after the * other, as does happen during bootup when all the tracers are * registered. This could cause the soft lockup watchdog to trigger. */ cond_resched(); tracing_selftest_running = true; ret = run_tracer_selftest(type); tracing_selftest_running = false; return ret; } static __init int init_trace_selftests(void) { struct trace_selftests *p, *n; struct tracer *t, **last; int ret; selftests_can_run = true; guard(mutex)(&trace_types_lock); if (list_empty(&postponed_selftests)) return 0; pr_info("Running postponed tracer tests:\n"); tracing_selftest_running = true; list_for_each_entry_safe(p, n, &postponed_selftests, list) { /* This loop can take minutes when sanitizers are enabled, so * lets make sure we allow RCU processing. */ cond_resched(); ret = run_tracer_selftest(p->type); /* If the test fails, then warn and remove from available_tracers */ if (ret < 0) { WARN(1, "tracer: %s failed selftest, disabling\n", p->type->name); last = &trace_types; for (t = trace_types; t; t = t->next) { if (t == p->type) { *last = t->next; break; } last = &t->next; } } list_del(&p->list); kfree(p); } tracing_selftest_running = false; return 0; } core_initcall(init_trace_selftests); #else static inline int do_run_tracer_selftest(struct tracer *type) { return 0; } #endif /* CONFIG_FTRACE_STARTUP_TEST */ static void add_tracer_options(struct trace_array *tr, struct tracer *t); static void __init apply_trace_boot_options(void); /** * register_tracer - register a tracer with the ftrace system. * @type: the plugin for the tracer * * Register a new plugin tracer. */ int __init register_tracer(struct tracer *type) { struct tracer *t; int ret = 0; if (!type->name) { pr_info("Tracer must have a name\n"); return -1; } if (strlen(type->name) >= MAX_TRACER_SIZE) { pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); return -1; } if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Can not register tracer %s due to lockdown\n", type->name); return -EPERM; } mutex_lock(&trace_types_lock); for (t = trace_types; t; t = t->next) { if (strcmp(type->name, t->name) == 0) { /* already found */ pr_info("Tracer %s already registered\n", type->name); ret = -1; goto out; } } if (!type->set_flag) type->set_flag = &dummy_set_flag; if (!type->flags) { /*allocate a dummy tracer_flags*/ type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL); if (!type->flags) { ret = -ENOMEM; goto out; } type->flags->val = 0; type->flags->opts = dummy_tracer_opt; } else if (!type->flags->opts) type->flags->opts = dummy_tracer_opt; /* store the tracer for __set_tracer_option */ type->flags->trace = type; ret = do_run_tracer_selftest(type); if (ret < 0) goto out; type->next = trace_types; trace_types = type; add_tracer_options(&global_trace, type); out: mutex_unlock(&trace_types_lock); if (ret || !default_bootup_tracer) goto out_unlock; if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE)) goto out_unlock; printk(KERN_INFO "Starting tracer '%s'\n", type->name); /* Do we want this tracer to start on bootup? */ tracing_set_tracer(&global_trace, type->name); default_bootup_tracer = NULL; apply_trace_boot_options(); /* disable other selftests, since this will break it. */ disable_tracing_selftest("running a tracer"); out_unlock: return ret; } static void tracing_reset_cpu(struct array_buffer *buf, int cpu) { struct trace_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); ring_buffer_reset_cpu(buffer, cpu); ring_buffer_record_enable(buffer); } void tracing_reset_online_cpus(struct array_buffer *buf) { struct trace_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); ring_buffer_reset_online_cpus(buffer); ring_buffer_record_enable(buffer); } static void tracing_reset_all_cpus(struct array_buffer *buf) { struct trace_buffer *buffer = buf->buffer; if (!buffer) return; ring_buffer_record_disable(buffer); /* Make sure all commits have finished */ synchronize_rcu(); buf->time_start = buffer_ftrace_now(buf, buf->cpu); ring_buffer_reset(buffer); ring_buffer_record_enable(buffer); } /* Must have trace_types_lock held */ void tracing_reset_all_online_cpus_unlocked(void) { struct trace_array *tr; lockdep_assert_held(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->clear_trace) continue; tr->clear_trace = false; tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE tracing_reset_online_cpus(&tr->max_buffer); #endif } } void tracing_reset_all_online_cpus(void) { mutex_lock(&trace_types_lock); tracing_reset_all_online_cpus_unlocked(); mutex_unlock(&trace_types_lock); } int is_tracing_stopped(void) { return global_trace.stop_count; } static void tracing_start_tr(struct trace_array *tr) { struct trace_buffer *buffer; unsigned long flags; if (tracing_disabled) return; raw_spin_lock_irqsave(&tr->start_lock, flags); if (--tr->stop_count) { if (WARN_ON_ONCE(tr->stop_count < 0)) { /* Someone screwed up their debugging */ tr->stop_count = 0; } goto out; } /* Prevent the buffers from switching */ arch_spin_lock(&tr->max_lock); buffer = tr->array_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = tr->max_buffer.buffer; if (buffer) ring_buffer_record_enable(buffer); #endif arch_spin_unlock(&tr->max_lock); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } /** * tracing_start - quick start of the tracer * * If tracing is enabled but was stopped by tracing_stop, * this will start the tracer back up. */ void tracing_start(void) { return tracing_start_tr(&global_trace); } static void tracing_stop_tr(struct trace_array *tr) { struct trace_buffer *buffer; unsigned long flags; raw_spin_lock_irqsave(&tr->start_lock, flags); if (tr->stop_count++) goto out; /* Prevent the buffers from switching */ arch_spin_lock(&tr->max_lock); buffer = tr->array_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #ifdef CONFIG_TRACER_MAX_TRACE buffer = tr->max_buffer.buffer; if (buffer) ring_buffer_record_disable(buffer); #endif arch_spin_unlock(&tr->max_lock); out: raw_spin_unlock_irqrestore(&tr->start_lock, flags); } /** * tracing_stop - quick stop of the tracer * * Light weight way to stop tracing. Use in conjunction with * tracing_start. */ void tracing_stop(void) { return tracing_stop_tr(&global_trace); } /* * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function * simplifies those functions and keeps them in sync. */ enum print_line_t trace_handle_return(struct trace_seq *s) { return trace_seq_has_overflowed(s) ? TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED; } EXPORT_SYMBOL_GPL(trace_handle_return); static unsigned short migration_disable_value(void) { #if defined(CONFIG_SMP) return current->migration_disabled; #else return 0; #endif } unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status) { unsigned int trace_flags = irqs_status; unsigned int pc; pc = preempt_count(); if (pc & NMI_MASK) trace_flags |= TRACE_FLAG_NMI; if (pc & HARDIRQ_MASK) trace_flags |= TRACE_FLAG_HARDIRQ; if (in_serving_softirq()) trace_flags |= TRACE_FLAG_SOFTIRQ; if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) trace_flags |= TRACE_FLAG_BH_OFF; if (tif_need_resched()) trace_flags |= TRACE_FLAG_NEED_RESCHED; if (test_preempt_need_resched()) trace_flags |= TRACE_FLAG_PREEMPT_RESCHED; if (IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY) && tif_test_bit(TIF_NEED_RESCHED_LAZY)) trace_flags |= TRACE_FLAG_NEED_RESCHED_LAZY; return (trace_flags << 16) | (min_t(unsigned int, pc & 0xff, 0xf)) | (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; } struct ring_buffer_event * trace_buffer_lock_reserve(struct trace_buffer *buffer, int type, unsigned long len, unsigned int trace_ctx) { return __trace_buffer_lock_reserve(buffer, type, len, trace_ctx); } DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); DEFINE_PER_CPU(int, trace_buffered_event_cnt); static int trace_buffered_event_ref; /** * trace_buffered_event_enable - enable buffering events * * When events are being filtered, it is quicker to use a temporary * buffer to write the event data into if there's a likely chance * that it will not be committed. The discard of the ring buffer * is not as fast as committing, and is much slower than copying * a commit. * * When an event is to be filtered, allocate per cpu buffers to * write the event data into, and if the event is filtered and discarded * it is simply dropped, otherwise, the entire data is to be committed * in one shot. */ void trace_buffered_event_enable(void) { struct ring_buffer_event *event; struct page *page; int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (trace_buffered_event_ref++) return; for_each_tracing_cpu(cpu) { page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); /* This is just an optimization and can handle failures */ if (!page) { pr_err("Failed to allocate event buffer\n"); break; } event = page_address(page); memset(event, 0, sizeof(*event)); per_cpu(trace_buffered_event, cpu) = event; preempt_disable(); if (cpu == smp_processor_id() && __this_cpu_read(trace_buffered_event) != per_cpu(trace_buffered_event, cpu)) WARN_ON_ONCE(1); preempt_enable(); } } static void enable_trace_buffered_event(void *data) { /* Probably not needed, but do it anyway */ smp_rmb(); this_cpu_dec(trace_buffered_event_cnt); } static void disable_trace_buffered_event(void *data) { this_cpu_inc(trace_buffered_event_cnt); } /** * trace_buffered_event_disable - disable buffering events * * When a filter is removed, it is faster to not use the buffered * events, and to commit directly into the ring buffer. Free up * the temp buffers when there are no more users. This requires * special synchronization with current events. */ void trace_buffered_event_disable(void) { int cpu; WARN_ON_ONCE(!mutex_is_locked(&event_mutex)); if (WARN_ON_ONCE(!trace_buffered_event_ref)) return; if (--trace_buffered_event_ref) return; /* For each CPU, set the buffer as used. */ on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event, NULL, true); /* Wait for all current users to finish */ synchronize_rcu(); for_each_tracing_cpu(cpu) { free_page((unsigned long)per_cpu(trace_buffered_event, cpu)); per_cpu(trace_buffered_event, cpu) = NULL; } /* * Wait for all CPUs that potentially started checking if they can use * their event buffer only after the previous synchronize_rcu() call and * they still read a valid pointer from trace_buffered_event. It must be * ensured they don't see cleared trace_buffered_event_cnt else they * could wrongly decide to use the pointed-to buffer which is now freed. */ synchronize_rcu(); /* For each CPU, relinquish the buffer */ on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL, true); } static struct trace_buffer *temp_buffer; struct ring_buffer_event * trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, struct trace_event_file *trace_file, int type, unsigned long len, unsigned int trace_ctx) { struct ring_buffer_event *entry; struct trace_array *tr = trace_file->tr; int val; *current_rb = tr->array_buffer.buffer; if (!tr->no_filter_buffering_ref && (trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED))) { preempt_disable_notrace(); /* * Filtering is on, so try to use the per cpu buffer first. * This buffer will simulate a ring_buffer_event, * where the type_len is zero and the array[0] will * hold the full length. * (see include/linux/ring-buffer.h for details on * how the ring_buffer_event is structured). * * Using a temp buffer during filtering and copying it * on a matched filter is quicker than writing directly * into the ring buffer and then discarding it when * it doesn't match. That is because the discard * requires several atomic operations to get right. * Copying on match and doing nothing on a failed match * is still quicker than no copy on match, but having * to discard out of the ring buffer on a failed match. */ if ((entry = __this_cpu_read(trace_buffered_event))) { int max_len = PAGE_SIZE - struct_size(entry, array, 1); val = this_cpu_inc_return(trace_buffered_event_cnt); /* * Preemption is disabled, but interrupts and NMIs * can still come in now. If that happens after * the above increment, then it will have to go * back to the old method of allocating the event * on the ring buffer, and if the filter fails, it * will have to call ring_buffer_discard_commit() * to remove it. * * Need to also check the unlikely case that the * length is bigger than the temp buffer size. * If that happens, then the reserve is pretty much * guaranteed to fail, as the ring buffer currently * only allows events less than a page. But that may * change in the future, so let the ring buffer reserve * handle the failure in that case. */ if (val == 1 && likely(len <= max_len)) { trace_event_setup(entry, type, trace_ctx); entry->array[0] = len; /* Return with preemption disabled */ return entry; } this_cpu_dec(trace_buffered_event_cnt); } /* __trace_buffer_lock_reserve() disables preemption */ preempt_enable_notrace(); } entry = __trace_buffer_lock_reserve(*current_rb, type, len, trace_ctx); /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer * to store the trace event for the trigger to use. It's recursive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { *current_rb = temp_buffer; entry = __trace_buffer_lock_reserve(*current_rb, type, len, trace_ctx); } return entry; } EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve); static DEFINE_RAW_SPINLOCK(tracepoint_iter_lock); static DEFINE_MUTEX(tracepoint_printk_mutex); static void output_printk(struct trace_event_buffer *fbuffer) { struct trace_event_call *event_call; struct trace_event_file *file; struct trace_event *event; unsigned long flags; struct trace_iterator *iter = tracepoint_print_iter; /* We should never get here if iter is NULL */ if (WARN_ON_ONCE(!iter)) return; event_call = fbuffer->trace_file->event_call; if (!event_call || !event_call->event.funcs || !event_call->event.funcs->trace) return; file = fbuffer->trace_file; if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && !filter_match_preds(file->filter, fbuffer->entry))) return; event = &fbuffer->trace_file->event_call->event; raw_spin_lock_irqsave(&tracepoint_iter_lock, flags); trace_seq_init(&iter->seq); iter->ent = fbuffer->entry; event_call->event.funcs->trace(iter, 0, event); trace_seq_putc(&iter->seq, 0); printk("%s", iter->seq.buffer); raw_spin_unlock_irqrestore(&tracepoint_iter_lock, flags); } int tracepoint_printk_sysctl(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int save_tracepoint_printk; int ret; guard(mutex)(&tracepoint_printk_mutex); save_tracepoint_printk = tracepoint_printk; ret = proc_dointvec(table, write, buffer, lenp, ppos); /* * This will force exiting early, as tracepoint_printk * is always zero when tracepoint_printk_iter is not allocated */ if (!tracepoint_print_iter) tracepoint_printk = 0; if (save_tracepoint_printk == tracepoint_printk) return ret; if (tracepoint_printk) static_key_enable(&tracepoint_printk_key.key); else static_key_disable(&tracepoint_printk_key.key); return ret; } void trace_event_buffer_commit(struct trace_event_buffer *fbuffer) { enum event_trigger_type tt = ETT_NONE; struct trace_event_file *file = fbuffer->trace_file; if (__event_trigger_test_discard(file, fbuffer->buffer, fbuffer->event, fbuffer->entry, &tt)) goto discard; if (static_key_false(&tracepoint_printk_key.key)) output_printk(fbuffer); if (static_branch_unlikely(&trace_event_exports_enabled)) ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT); trace_buffer_unlock_commit_regs(file->tr, fbuffer->buffer, fbuffer->event, fbuffer->trace_ctx, fbuffer->regs); discard: if (tt) event_triggers_post_call(file, tt); } EXPORT_SYMBOL_GPL(trace_event_buffer_commit); /* * Skip 3: * * trace_buffer_unlock_commit_regs() * trace_event_buffer_commit() * trace_event_raw_event_xxx() */ # define STACK_SKIP 3 void trace_buffer_unlock_commit_regs(struct trace_array *tr, struct trace_buffer *buffer, struct ring_buffer_event *event, unsigned int trace_ctx, struct pt_regs *regs) { __buffer_unlock_commit(buffer, event); /* * If regs is not set, then skip the necessary functions. * Note, we can still get here via blktrace, wakeup tracer * and mmiotrace, but that's ok if they lose a function or * two. They are not that meaningful. */ ftrace_trace_stack(tr, buffer, trace_ctx, regs ? 0 : STACK_SKIP, regs); ftrace_trace_userstack(tr, buffer, trace_ctx); } /* * Similar to trace_buffer_unlock_commit_regs() but do not dump stack. */ void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, struct ring_buffer_event *event) { __buffer_unlock_commit(buffer, event); } void trace_function(struct trace_array *tr, unsigned long ip, unsigned long parent_ip, unsigned int trace_ctx) { struct trace_buffer *buffer = tr->array_buffer.buffer; struct ring_buffer_event *event; struct ftrace_entry *entry; event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry), trace_ctx); if (!event) return; entry = ring_buffer_event_data(event); entry->ip = ip; entry->parent_ip = parent_ip; if (static_branch_unlikely(&trace_function_exports_enabled)) ftrace_exports(event, TRACE_EXPORT_FUNCTION); __buffer_unlock_commit(buffer, event); } #ifdef CONFIG_STACKTRACE /* Allow 4 levels of nesting: normal, softirq, irq, NMI */ #define FTRACE_KSTACK_NESTING 4 #define FTRACE_KSTACK_ENTRIES (SZ_4K / FTRACE_KSTACK_NESTING) struct ftrace_stack { unsigned long calls[FTRACE_KSTACK_ENTRIES]; }; struct ftrace_stacks { struct ftrace_stack stacks[FTRACE_KSTACK_NESTING]; }; static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); static DEFINE_PER_CPU(int, ftrace_stack_reserve); static void __ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs) { struct ring_buffer_event *event; unsigned int size, nr_entries; struct ftrace_stack *fstack; struct stack_entry *entry; int stackidx; /* * Add one, for this function and the call to save_stack_trace() * If regs is set, then these functions will not be in the way. */ #ifndef CONFIG_UNWINDER_ORC if (!regs) skip++; #endif preempt_disable_notrace(); stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; /* This should never happen. If it does, yell once and skip */ if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) goto out; /* * The above __this_cpu_inc_return() is 'atomic' cpu local. An * interrupt will either see the value pre increment or post * increment. If the interrupt happens pre increment it will have * restored the counter when it returns. We just need a barrier to * keep gcc from moving things around. */ barrier(); fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx; size = ARRAY_SIZE(fstack->calls); if (regs) { nr_entries = stack_trace_save_regs(regs, fstack->calls, size, skip); } else { nr_entries = stack_trace_save(fstack->calls, size, skip); } #ifdef CONFIG_DYNAMIC_FTRACE /* Mark entry of stack trace as trampoline code */ if (tr->ops && tr->ops->trampoline) { unsigned long tramp_start = tr->ops->trampoline; unsigned long tramp_end = tramp_start + tr->ops->trampoline_size; unsigned long *calls = fstack->calls; for (int i = 0; i < nr_entries; i++) { if (calls[i] >= tramp_start && calls[i] < tramp_end) calls[i] = FTRACE_TRAMPOLINE_MARKER; } } #endif event = __trace_buffer_lock_reserve(buffer, TRACE_STACK, struct_size(entry, caller, nr_entries), trace_ctx); if (!event) goto out; entry = ring_buffer_event_data(event); entry->size = nr_entries; memcpy(&entry->caller, fstack->calls, flex_array_size(entry, caller, nr_entries)); __buffer_unlock_commit(buffer, event); out: /* Again, don't let gcc optimize things here */ barrier(); __this_cpu_dec(ftrace_stack_reserve); preempt_enable_notrace(); } static inline void ftrace_trace_stack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx, int skip, struct pt_regs *regs) { if (!(tr->trace_flags & TRACE_ITER_STACKTRACE)) return; __ftrace_trace_stack(tr, buffer, trace_ctx, skip, regs); } void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip) { struct trace_buffer *buffer = tr->array_buffer.buffer; if (rcu_is_watching()) { __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); return; } if (WARN_ON_ONCE(IS_ENABLED(CONFIG_GENERIC_ENTRY))) return; /* * When an NMI triggers, RCU is enabled via ct_nmi_enter(), * but if the above rcu_is_watching() failed, then the NMI * triggered someplace critical, and ct_irq_enter() should * not be called from NMI. */ if (unlikely(in_nmi())) return; ct_irq_enter_irqson(); __ftrace_trace_stack(tr, buffer, trace_ctx, skip, NULL); ct_irq_exit_irqson(); } /** * trace_dump_stack - record a stack back trace in the trace buffer * @skip: Number of functions to skip (helper handlers) */ void trace_dump_stack(int skip) { if (tracing_disabled || tracing_selftest_running) return; #ifndef CONFIG_UNWINDER_ORC /* Skip 1 to skip this function. */ skip++; #endif __ftrace_trace_stack(printk_trace, printk_trace->array_buffer.buffer, tracing_gen_ctx(), skip, NULL); } EXPORT_SYMBOL_GPL(trace_dump_stack); #ifdef CONFIG_USER_STACKTRACE_SUPPORT static DEFINE_PER_CPU(int, user_stack_count); static void ftrace_trace_userstack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx) { struct ring_buffer_event *event; struct userstack_entry *entry; if (!(tr->trace_flags & TRACE_ITER_USERSTACKTRACE)) return; /* * NMIs can not handle page faults, even with fix ups. * The save user stack can (and often does) fault. */ if (unlikely(in_nmi())) return; /* * prevent recursion, since the user stack tracing may * trigger other kernel events. */ preempt_disable(); if (__this_cpu_read(user_stack_count)) goto out; __this_cpu_inc(user_stack_count); event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, sizeof(*entry), trace_ctx); if (!event) goto out_drop_count; entry = ring_buffer_event_data(event); entry->tgid = current->tgid; memset(&entry->caller, 0, sizeof(entry->caller)); stack_trace_save_user(entry->caller, FTRACE_STACK_ENTRIES); __buffer_unlock_commit(buffer, event); out_drop_count: __this_cpu_dec(user_stack_count); out: preempt_enable(); } #else /* CONFIG_USER_STACKTRACE_SUPPORT */ static void ftrace_trace_userstack(struct trace_array *tr, struct trace_buffer *buffer, unsigned int trace_ctx) { } #endif /* !CONFIG_USER_STACKTRACE_SUPPORT */ #endif /* CONFIG_STACKTRACE */ static inline void func_repeats_set_delta_ts(struct func_repeats_entry *entry, unsigned long long delta) { entry->bottom_delta_ts = delta & U32_MAX; entry->top_delta_ts = (delta >> 32); } void trace_last_func_repeats(struct trace_array *tr, struct trace_func_repeats *last_info, unsigned int trace_ctx) { struct trace_buffer *buffer = tr->array_buffer.buffer; struct func_repeats_entry *entry; struct ring_buffer_event *event; u64 delta; event = __trace_buffer_lock_reserve(buffer, TRACE_FUNC_REPEATS, sizeof(*entry), trace_ctx); if (!event) return; delta = ring_buffer_event_time_stamp(buffer, event) - last_info->ts_last_call; entry = ring_buffer_event_data(event); entry->ip = last_info->ip; entry->parent_ip = last_info->parent_ip; entry->count = last_info->count; func_repeats_set_delta_ts(entry, delta); __buffer_unlock_commit(buffer, event); } /* created for use with alloc_percpu */ struct trace_buffer_struct { int nesting; char buffer[4][TRACE_BUF_SIZE]; }; static struct trace_buffer_struct __percpu *trace_percpu_buffer; /* * This allows for lockless recording. If we're nested too deeply, then * this returns NULL. */ static char *get_trace_buf(void) { struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer); if (!trace_percpu_buffer || buffer->nesting >= 4) return NULL; buffer->nesting++; /* Interrupts must see nesting incremented before we use the buffer */ barrier(); return &buffer->buffer[buffer->nesting - 1][0]; } static void put_trace_buf(void) { /* Don't let the decrement of nesting leak before this */ barrier(); this_cpu_dec(trace_percpu_buffer->nesting); } static int alloc_percpu_trace_buffer(void) { struct trace_buffer_struct __percpu *buffers; if (trace_percpu_buffer) return 0; buffers = alloc_percpu(struct trace_buffer_struct); if (MEM_FAIL(!buffers, "Could not allocate percpu trace_printk buffer")) return -ENOMEM; trace_percpu_buffer = buffers; return 0; } static int buffers_allocated; void trace_printk_init_buffers(void) { if (buffers_allocated) return; if (alloc_percpu_trace_buffer()) return; /* trace_printk() is for debug use only. Don't use it in production. */ pr_warn("\n"); pr_warn("**********************************************************\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("** **\n"); pr_warn("** trace_printk() being used. Allocating extra memory. **\n"); pr_warn("** **\n"); pr_warn("** This means that this is a DEBUG kernel and it is **\n"); pr_warn("** unsafe for production use. **\n"); pr_warn("** **\n"); pr_warn("** If you see this message and you are not debugging **\n"); pr_warn("** the kernel, report this immediately to your vendor! **\n"); pr_warn("** **\n"); pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n"); pr_warn("**********************************************************\n"); /* Expand the buffers to set size */ tracing_update_buffers(&global_trace); buffers_allocated = 1; /* * trace_printk_init_buffers() can be called by modules. * If that happens, then we need to start cmdline recording * directly here. If the global_trace.buffer is already * allocated here, then this was called by module code. */ if (global_trace.array_buffer.buffer) tracing_start_cmdline_record(); } EXPORT_SYMBOL_GPL(trace_printk_init_buffers); void trace_printk_start_comm(void) { /* Start tracing comms if trace printk is set */ if (!buffers_allocated) return; tracing_start_cmdline_record(); } static void trace_printk_start_stop_comm(int enabled) { if (!buffers_allocated) return; if (enabled) tracing_start_cmdline_record(); else tracing_stop_cmdline_record(); } /** * trace_vbprintk - write binary msg to tracing buffer * @ip: The address of the caller * @fmt: The string format to write to the buffer * @args: Arguments for @fmt */ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) { struct ring_buffer_event *event; struct trace_buffer *buffer; struct trace_array *tr = READ_ONCE(printk_trace); struct bprint_entry *entry; unsigned int trace_ctx; char *tbuffer; int len = 0, size; if (!printk_binsafe(tr)) return trace_vprintk(ip, fmt, args); if (unlikely(tracing_selftest_running || tracing_disabled)) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); trace_ctx = tracing_gen_ctx(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args); if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0) goto out_put; size = sizeof(*entry) + sizeof(u32) * len; buffer = tr->array_buffer.buffer; ring_buffer_nest_start(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size, trace_ctx); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; entry->fmt = fmt; memcpy(entry->buf, tbuffer, sizeof(u32) * len); __buffer_unlock_commit(buffer, event); ftrace_trace_stack(tr, buffer, trace_ctx, 6, NULL); out: ring_buffer_nest_end(buffer); out_put: put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } EXPORT_SYMBOL_GPL(trace_vbprintk); __printf(3, 0) static int __trace_array_vprintk(struct trace_buffer *buffer, unsigned long ip, const char *fmt, va_list args) { struct ring_buffer_event *event; int len = 0, size; struct print_entry *entry; unsigned int trace_ctx; char *tbuffer; if (tracing_disabled) return 0; /* Don't pollute graph traces with trace_vprintk internals */ pause_graph_tracing(); trace_ctx = tracing_gen_ctx(); preempt_disable_notrace(); tbuffer = get_trace_buf(); if (!tbuffer) { len = 0; goto out_nobuffer; } len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); size = sizeof(*entry) + len + 1; ring_buffer_nest_start(buffer); event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, trace_ctx); if (!event) goto out; entry = ring_buffer_event_data(event); entry->ip = ip; memcpy(&entry->buf, tbuffer, len + 1); __buffer_unlock_commit(buffer, event); ftrace_trace_stack(printk_trace, buffer, trace_ctx, 6, NULL); out: ring_buffer_nest_end(buffer); put_trace_buf(); out_nobuffer: preempt_enable_notrace(); unpause_graph_tracing(); return len; } __printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { if (tracing_selftest_running && tr == &global_trace) return 0; return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args); } /** * trace_array_printk - Print a message to a specific instance * @tr: The instance trace_array descriptor * @ip: The instruction pointer that this is called from. * @fmt: The format to print (printf format) * * If a subsystem sets up its own instance, they have the right to * printk strings into their tracing instance buffer using this * function. Note, this function will not write into the top level * buffer (use trace_printk() for that), as writing into the top level * buffer should only have events that can be individually disabled. * trace_printk() is only used for debugging a kernel, and should not * be ever incorporated in normal use. * * trace_array_printk() can be used, as it will not add noise to the * top level tracing buffer. * * Note, trace_array_init_printk() must be called on @tr before this * can be used. */ __printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!tr) return -ENOENT; /* This is only allowed for created instances */ if (tr == &global_trace) return 0; if (!(tr->trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = trace_array_vprintk(tr, ip, fmt, ap); va_end(ap); return ret; } EXPORT_SYMBOL_GPL(trace_array_printk); /** * trace_array_init_printk - Initialize buffers for trace_array_printk() * @tr: The trace array to initialize the buffers for * * As trace_array_printk() only writes into instances, they are OK to * have in the kernel (unlike trace_printk()). This needs to be called * before trace_array_printk() can be used on a trace_array. */ int trace_array_init_printk(struct trace_array *tr) { if (!tr) return -ENOENT; /* This is only allowed for created instances */ if (tr == &global_trace) return -EINVAL; return alloc_percpu_trace_buffer(); } EXPORT_SYMBOL_GPL(trace_array_init_printk); __printf(3, 4) int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...) { int ret; va_list ap; if (!(printk_trace->trace_flags & TRACE_ITER_PRINTK)) return 0; va_start(ap, fmt); ret = __trace_array_vprintk(buffer, ip, fmt, ap); va_end(ap); return ret; } __printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(printk_trace, ip, fmt, args); } EXPORT_SYMBOL_GPL(trace_vprintk); static void trace_iterator_increment(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu); iter->idx++; if (buf_iter) ring_buffer_iter_advance(buf_iter); } static struct trace_entry * peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { event = ring_buffer_iter_peek(buf_iter, ts); if (lost_events) *lost_events = ring_buffer_iter_dropped(buf_iter) ? (unsigned long)-1 : 0; } else { event = ring_buffer_peek(iter->array_buffer->buffer, cpu, ts, lost_events); } if (event) { iter->ent_size = ring_buffer_event_length(event); return ring_buffer_event_data(event); } iter->ent_size = 0; return NULL; } static struct trace_entry * __find_next_entry(struct trace_iterator *iter, int *ent_cpu, unsigned long *missing_events, u64 *ent_ts) { struct trace_buffer *buffer = iter->array_buffer->buffer; struct trace_entry *ent, *next = NULL; unsigned long lost_events = 0, next_lost = 0; int cpu_file = iter->cpu_file; u64 next_ts = 0, ts; int next_cpu = -1; int next_size = 0; int cpu; /* * If we are in a per_cpu trace file, don't bother by iterating over * all cpu and peek directly. */ if (cpu_file > RING_BUFFER_ALL_CPUS) { if (ring_buffer_empty_cpu(buffer, cpu_file)) return NULL; ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events); if (ent_cpu) *ent_cpu = cpu_file; return ent; } for_each_tracing_cpu(cpu) { if (ring_buffer_empty_cpu(buffer, cpu)) continue; ent = peek_next_entry(iter, cpu, &ts, &lost_events); /* * Pick the entry with the smallest timestamp: */ if (ent && (!next || ts < next_ts)) { next = ent; next_cpu = cpu; next_ts = ts; next_lost = lost_events; next_size = iter->ent_size; } } iter->ent_size = next_size; if (ent_cpu) *ent_cpu = next_cpu; if (ent_ts) *ent_ts = next_ts; if (missing_events) *missing_events = next_lost; return next; } #define STATIC_FMT_BUF_SIZE 128 static char static_fmt_buf[STATIC_FMT_BUF_SIZE]; char *trace_iter_expand_format(struct trace_iterator *iter) { char *tmp; /* * iter->tr is NULL when used with tp_printk, which makes * this get called where it is not safe to call krealloc(). */ if (!iter->tr || iter->fmt == static_fmt_buf) return NULL; tmp = krealloc(iter->fmt, iter->fmt_size + STATIC_FMT_BUF_SIZE, GFP_KERNEL); if (tmp) { iter->fmt_size += STATIC_FMT_BUF_SIZE; iter->fmt = tmp; } return tmp; } /* Returns true if the string is safe to dereference from an event */ static bool trace_safe_str(struct trace_iterator *iter, const char *str) { unsigned long addr = (unsigned long)str; struct trace_event *trace_event; struct trace_event_call *event; /* OK if part of the event data */ if ((addr >= (unsigned long)iter->ent) && (addr < (unsigned long)iter->ent + iter->ent_size)) return true; /* OK if part of the temp seq buffer */ if ((addr >= (unsigned long)iter->tmp_seq.buffer) && (addr < (unsigned long)iter->tmp_seq.buffer + TRACE_SEQ_BUFFER_SIZE)) return true; /* Core rodata can not be freed */ if (is_kernel_rodata(addr)) return true; if (trace_is_tracepoint_string(str)) return true; /* * Now this could be a module event, referencing core module * data, which is OK. */ if (!iter->ent) return false; trace_event = ftrace_find_event(iter->ent->type); if (!trace_event) return false; event = container_of(trace_event, struct trace_event_call, event); if ((event->flags & TRACE_EVENT_FL_DYNAMIC) || !event->module) return false; /* Would rather have rodata, but this will suffice */ if (within_module_core(addr, event->module)) return true; return false; } /** * ignore_event - Check dereferenced fields while writing to the seq buffer * @iter: The iterator that holds the seq buffer and the event being printed * * At boot up, test_event_printk() will flag any event that dereferences * a string with "%s" that does exist in the ring buffer. It may still * be valid, as the string may point to a static string in the kernel * rodata that never gets freed. But if the string pointer is pointing * to something that was allocated, there's a chance that it can be freed * by the time the user reads the trace. This would cause a bad memory * access by the kernel and possibly crash the system. * * This function will check if the event has any fields flagged as needing * to be checked at runtime and perform those checks. * * If it is found that a field is unsafe, it will write into the @iter->seq * a message stating what was found to be unsafe. * * @return: true if the event is unsafe and should be ignored, * false otherwise. */ bool ignore_event(struct trace_iterator *iter) { struct ftrace_event_field *field; struct trace_event *trace_event; struct trace_event_call *event; struct list_head *head; struct trace_seq *seq; const void *ptr; trace_event = ftrace_find_event(iter->ent->type); seq = &iter->seq; if (!trace_event) { trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type); return true; } event = container_of(trace_event, struct trace_event_call, event); if (!(event->flags & TRACE_EVENT_FL_TEST_STR)) return false; head = trace_get_fields(event); if (!head) { trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n", trace_event_name(event)); return true; } /* Offsets are from the iter->ent that points to the raw event */ ptr = iter->ent; list_for_each_entry(field, head, link) { const char *str; bool good; if (!field->needs_test) continue; str = *(const char **)(ptr + field->offset); good = trace_safe_str(iter, str); /* * If you hit this warning, it is likely that the * trace event in question used %s on a string that * was saved at the time of the event, but may not be * around when the trace is read. Use __string(), * __assign_str() and __get_str() helpers in the TRACE_EVENT() * instead. See samples/trace_events/trace-events-sample.h * for reference. */ if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'", trace_event_name(event), field->name)) { trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n", trace_event_name(event), field->name); return true; } } return false; } const char *trace_event_format(struct trace_iterator *iter, const char *fmt) { const char *p, *new_fmt; char *q; if (WARN_ON_ONCE(!fmt)) return fmt; if (!iter->tr || iter->tr->trace_flags & TRACE_ITER_HASH_PTR) return fmt; p = fmt; new_fmt = q = iter->fmt; while (*p) { if (unlikely(q - new_fmt + 3 > iter->fmt_size)) { if (!trace_iter_expand_format(iter)) return fmt; q += iter->fmt - new_fmt; new_fmt = iter->fmt; } *q++ = *p++; /* Replace %p with %px */ if (p[-1] == '%') { if (p[0] == '%') { *q++ = *p++; } else if (p[0] == 'p' && !isalnum(p[1])) { *q++ = *p++; *q++ = 'x'; } } } *q = '\0'; return new_fmt; } #define STATIC_TEMP_BUF_SIZE 128 static char static_temp_buf[STATIC_TEMP_BUF_SIZE] __aligned(4); /* Find the next real entry, without updating the iterator itself */ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) { /* __find_next_entry will reset ent_size */ int ent_size = iter->ent_size; struct trace_entry *entry; /* * If called from ftrace_dump(), then the iter->temp buffer * will be the static_temp_buf and not created from kmalloc. * If the entry size is greater than the buffer, we can * not save it. Just return NULL in that case. This is only * used to add markers when two consecutive events' time * stamps have a large delta. See trace_print_lat_context() */ if (iter->temp == static_temp_buf && STATIC_TEMP_BUF_SIZE < ent_size) return NULL; /* * The __find_next_entry() may call peek_next_entry(), which may * call ring_buffer_peek() that may make the contents of iter->ent * undefined. Need to copy iter->ent now. */ if (iter->ent && iter->ent != iter->temp) { if ((!iter->temp || iter->temp_size < iter->ent_size) && !WARN_ON_ONCE(iter->temp == static_temp_buf)) { void *temp; temp = kmalloc(iter->ent_size, GFP_KERNEL); if (!temp) return NULL; kfree(iter->temp); iter->temp = temp; iter->temp_size = iter->ent_size; } memcpy(iter->temp, iter->ent, iter->ent_size); iter->ent = iter->temp; } entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); /* Put back the original ent_size */ iter->ent_size = ent_size; return entry; } /* Find the next real entry, and increment the iterator to the next entry */ void *trace_find_next_entry_inc(struct trace_iterator *iter) { iter->ent = __find_next_entry(iter, &iter->cpu, &iter->lost_events, &iter->ts); if (iter->ent) trace_iterator_increment(iter); return iter->ent ? iter : NULL; } static void trace_consume(struct trace_iterator *iter) { ring_buffer_consume(iter->array_buffer->buffer, iter->cpu, &iter->ts, &iter->lost_events); } static void *s_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_iterator *iter = m->private; int i = (int)*pos; void *ent; WARN_ON_ONCE(iter->leftover); (*pos)++; /* can't go backwards */ if (iter->idx > i) return NULL; if (iter->idx < 0) ent = trace_find_next_entry_inc(iter); else ent = iter; while (ent && iter->idx < i) ent = trace_find_next_entry_inc(iter); iter->pos = *pos; return ent; } void tracing_iter_reset(struct trace_iterator *iter, int cpu) { struct ring_buffer_iter *buf_iter; unsigned long entries = 0; u64 ts; per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = 0; buf_iter = trace_buffer_iter(iter, cpu); if (!buf_iter) return; ring_buffer_iter_reset(buf_iter); /* * We could have the case with the max latency tracers * that a reset never took place on a cpu. This is evident * by the timestamp being before the start of the buffer. */ while (ring_buffer_iter_peek(buf_iter, &ts)) { if (ts >= iter->array_buffer->time_start) break; entries++; ring_buffer_iter_advance(buf_iter); /* This could be a big loop */ cond_resched(); } per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; } /* * The current tracer is copied to avoid a global locking * all around. */ static void *s_start(struct seq_file *m, loff_t *pos) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; int cpu_file = iter->cpu_file; void *p = NULL; loff_t l = 0; int cpu; mutex_lock(&trace_types_lock); if (unlikely(tr->current_trace != iter->trace)) { /* Close iter->trace before switching to the new current tracer */ if (iter->trace->close) iter->trace->close(iter); iter->trace = tr->current_trace; /* Reopen the new current tracer */ if (iter->trace->open) iter->trace->open(iter); } mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return ERR_PTR(-EBUSY); #endif if (*pos != iter->pos) { iter->ent = NULL; iter->cpu = 0; iter->idx = -1; if (cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) tracing_iter_reset(iter, cpu); } else tracing_iter_reset(iter, cpu_file); iter->leftover = 0; for (p = iter; p && l < *pos; p = s_next(m, p, &l)) ; } else { /* * If we overflowed the seq_file before, then we want * to just reuse the trace_seq buffer again. */ if (iter->leftover) p = iter; else { l = *pos - 1; p = s_next(m, p, &l); } } trace_event_read_lock(); trace_access_lock(cpu_file); return p; } static void s_stop(struct seq_file *m, void *p) { struct trace_iterator *iter = m->private; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->trace->use_max_tr) return; #endif trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); } static void get_total_entries_cpu(struct array_buffer *buf, unsigned long *total, unsigned long *entries, int cpu) { unsigned long count; count = ring_buffer_entries_cpu(buf->buffer, cpu); /* * If this buffer has skipped entries, then we hold all * entries for the trace and we need to ignore the * ones before the time stamp. */ if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; /* total is the same as the entries */ *total = count; } else *total = count + ring_buffer_overrun_cpu(buf->buffer, cpu); *entries = count; } static void get_total_entries(struct array_buffer *buf, unsigned long *total, unsigned long *entries) { unsigned long t, e; int cpu; *total = 0; *entries = 0; for_each_tracing_cpu(cpu) { get_total_entries_cpu(buf, &t, &e, cpu); *total += t; *entries += e; } } unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu) { unsigned long total, entries; if (!tr) tr = &global_trace; get_total_entries_cpu(&tr->array_buffer, &total, &entries, cpu); return entries; } unsigned long trace_total_entries(struct trace_array *tr) { unsigned long total, entries; if (!tr) tr = &global_trace; get_total_entries(&tr->array_buffer, &total, &entries); return entries; } static void print_lat_help_header(struct seq_file *m) { seq_puts(m, "# _------=> CPU# \n" "# / _-----=> irqs-off/BH-disabled\n" "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" "# |||| / _-=> migrate-disable \n" "# ||||| / delay \n" "# cmd pid |||||| time | caller \n" "# \\ / |||||| \\ | / \n"); } static void print_event_info(struct array_buffer *buf, struct seq_file *m) { unsigned long total; unsigned long entries; get_total_entries(buf, &total, &entries); seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n", entries, total, num_online_cpus()); seq_puts(m, "#\n"); } static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; print_event_info(buf, m); seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; static const char space[] = " "; int prec = tgid ? 12 : 2; print_event_info(buf, m); seq_printf(m, "# %.*s _-----=> irqs-off/BH-disabled\n", prec, space); seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); seq_printf(m, "# %.*s||| / _-=> migrate-disable\n", prec, space); seq_printf(m, "# %.*s|||| / delay\n", prec, space); seq_printf(m, "# TASK-PID %.*s CPU# ||||| TIMESTAMP FUNCTION\n", prec, " TGID "); seq_printf(m, "# | | %.*s | ||||| | |\n", prec, " | "); } void print_trace_header(struct seq_file *m, struct trace_iterator *iter) { unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK); struct array_buffer *buf = iter->array_buffer; struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); struct tracer *type = iter->trace; unsigned long entries; unsigned long total; const char *name = type->name; get_total_entries(buf, &total, &entries); seq_printf(m, "# %s latency trace v1.1.5 on %s\n", name, init_utsname()->release); seq_puts(m, "# -----------------------------------" "---------------------------------\n"); seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |" " (M:%s VP:%d, KP:%d, SP:%d HP:%d", nsecs_to_usecs(data->saved_latency), entries, total, buf->cpu, preempt_model_none() ? "server" : preempt_model_voluntary() ? "desktop" : preempt_model_full() ? "preempt" : preempt_model_lazy() ? "lazy" : preempt_model_rt() ? "preempt_rt" : "unknown", /* These are reserved for later use */ 0, 0, 0, 0); #ifdef CONFIG_SMP seq_printf(m, " #P:%d)\n", num_online_cpus()); #else seq_puts(m, ")\n"); #endif seq_puts(m, "# -----------------\n"); seq_printf(m, "# | task: %.16s-%d " "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n", data->comm, data->pid, from_kuid_munged(seq_user_ns(m), data->uid), data->nice, data->policy, data->rt_priority); seq_puts(m, "# -----------------\n"); if (data->critical_start) { seq_puts(m, "# => started at: "); seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n# => ended at: "); seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags); trace_print_seq(m, &iter->seq); seq_puts(m, "\n#\n"); } seq_puts(m, "#\n"); } static void test_cpu_buff_start(struct trace_iterator *iter) { struct trace_seq *s = &iter->seq; struct trace_array *tr = iter->tr; if (!(tr->trace_flags & TRACE_ITER_ANNOTATE)) return; if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) return; if (cpumask_available(iter->started) && cpumask_test_cpu(iter->cpu, iter->started)) return; if (per_cpu_ptr(iter->array_buffer->data, iter->cpu)->skipped_entries) return; if (cpumask_available(iter->started)) cpumask_set_cpu(iter->cpu, iter->started); /* Don't print started cpu buffer for the first entry of the trace */ if (iter->idx > 1) trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); } static enum print_line_t print_trace_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK); struct trace_entry *entry; struct trace_event *event; entry = iter->ent; test_cpu_buff_start(iter); event = ftrace_find_event(entry->type); if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { if (iter->iter_flags & TRACE_FILE_LAT_FMT) trace_print_lat_context(iter); else trace_print_context(iter); } if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; if (event) { if (tr->trace_flags & TRACE_ITER_FIELDS) return print_event_fields(iter, event); /* * For TRACE_EVENT() events, the print_fmt is not * safe to use if the array has delta offsets * Force printing via the fields. */ if ((tr->text_delta || tr->data_delta) && event->type > __TRACE_LAST_TYPE) return print_event_fields(iter, event); return event->funcs->trace(iter, sym_flags, event); } trace_seq_printf(s, "Unknown type %d\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_raw_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) trace_seq_printf(s, "%d %d %llu ", entry->pid, iter->cpu, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; event = ftrace_find_event(entry->type); if (event) return event->funcs->raw(iter, 0, event); trace_seq_printf(s, "%d ?\n", entry->type); return trace_handle_return(s); } static enum print_line_t print_hex_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; unsigned char newline = '\n'; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_HEX_FIELD(s, entry->pid); SEQ_PUT_HEX_FIELD(s, iter->cpu); SEQ_PUT_HEX_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); if (event) { enum print_line_t ret = event->funcs->hex(iter, 0, event); if (ret != TRACE_TYPE_HANDLED) return ret; } SEQ_PUT_FIELD(s, newline); return trace_handle_return(s); } static enum print_line_t print_bin_fmt(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; struct trace_seq *s = &iter->seq; struct trace_entry *entry; struct trace_event *event; entry = iter->ent; if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) { SEQ_PUT_FIELD(s, entry->pid); SEQ_PUT_FIELD(s, iter->cpu); SEQ_PUT_FIELD(s, iter->ts); if (trace_seq_has_overflowed(s)) return TRACE_TYPE_PARTIAL_LINE; } event = ftrace_find_event(entry->type); return event ? event->funcs->binary(iter, 0, event) : TRACE_TYPE_HANDLED; } int trace_empty(struct trace_iterator *iter) { struct ring_buffer_iter *buf_iter; int cpu; /* If we are looking at one CPU buffer, only check that one */ if (iter->cpu_file != RING_BUFFER_ALL_CPUS) { cpu = iter->cpu_file; buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) return 0; } return 1; } for_each_tracing_cpu(cpu) { buf_iter = trace_buffer_iter(iter, cpu); if (buf_iter) { if (!ring_buffer_iter_empty(buf_iter)) return 0; } else { if (!ring_buffer_empty_cpu(iter->array_buffer->buffer, cpu)) return 0; } } return 1; } /* Called with trace_event_read_lock() held. */ enum print_line_t print_trace_line(struct trace_iterator *iter) { struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; enum print_line_t ret; if (iter->lost_events) { if (iter->lost_events == (unsigned long)-1) trace_seq_printf(&iter->seq, "CPU:%d [LOST EVENTS]\n", iter->cpu); else trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", iter->cpu, iter->lost_events); if (trace_seq_has_overflowed(&iter->seq)) return TRACE_TYPE_PARTIAL_LINE; } if (iter->trace && iter->trace->print_line) { ret = iter->trace->print_line(iter); if (ret != TRACE_TYPE_UNHANDLED) return ret; } if (iter->ent->type == TRACE_BPUTS && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bputs_msg_only(iter); if (iter->ent->type == TRACE_BPRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_bprintk_msg_only(iter); if (iter->ent->type == TRACE_PRINT && trace_flags & TRACE_ITER_PRINTK && trace_flags & TRACE_ITER_PRINTK_MSGONLY) return trace_print_printk_msg_only(iter); if (trace_flags & TRACE_ITER_BIN) return print_bin_fmt(iter); if (trace_flags & TRACE_ITER_HEX) return print_hex_fmt(iter); if (trace_flags & TRACE_ITER_RAW) return print_raw_fmt(iter); return print_trace_fmt(iter); } void trace_latency_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) print_trace_header(m, iter); if (!(tr->trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } void trace_default_header(struct seq_file *m) { struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long trace_flags = tr->trace_flags; if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) return; if (iter->iter_flags & TRACE_FILE_LAT_FMT) { /* print nothing if the buffers are empty */ if (trace_empty(iter)) return; print_trace_header(m, iter); if (!(trace_flags & TRACE_ITER_VERBOSE)) print_lat_help_header(m); } else { if (!(trace_flags & TRACE_ITER_VERBOSE)) { if (trace_flags & TRACE_ITER_IRQ_INFO) print_func_help_header_irq(iter->array_buffer, m, trace_flags); else print_func_help_header(iter->array_buffer, m, trace_flags); } } } static void test_ftrace_alive(struct seq_file *m) { if (!ftrace_is_dead()) return; seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" "# MAY BE MISSING FUNCTION EVENTS\n"); } #ifdef CONFIG_TRACER_MAX_TRACE static void show_snapshot_main_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer.\n" "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void show_snapshot_percpu_help(struct seq_file *m) { seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" "# Takes a snapshot of the main buffer for this cpu.\n"); #else seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" "# Must use main snapshot file to allocate.\n"); #endif seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" "# (Doesn't have to be '2' works with any number that\n" "# is not a '0' or '1')\n"); } static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { if (iter->tr->allocated_snapshot) seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); else seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); seq_puts(m, "# Snapshot commands:\n"); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) show_snapshot_main_help(m); else show_snapshot_percpu_help(m); } #else /* Should never be called */ static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } #endif static int s_show(struct seq_file *m, void *v) { struct trace_iterator *iter = v; int ret; if (iter->ent == NULL) { if (iter->tr) { seq_printf(m, "# tracer: %s\n", iter->trace->name); seq_puts(m, "#\n"); test_ftrace_alive(m); } if (iter->snapshot && trace_empty(iter)) print_snapshot_help(m, iter); else if (iter->trace && iter->trace->print_header) iter->trace->print_header(m); else trace_default_header(m); } else if (iter->leftover) { /* * If we filled the seq_file buffer earlier, we * want to just show it now. */ ret = trace_print_seq(m, &iter->seq); /* ret should this time be zero, but you never know */ iter->leftover = ret; } else { ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.full = 0; trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); } ret = trace_print_seq(m, &iter->seq); /* * If we overflow the seq_file buffer, then it will * ask us for this data again at start up. * Use that instead. * ret is 0 if seq_file write succeeded. * -1 otherwise. */ iter->leftover = ret; } return 0; } /* * Should be used after trace_array_get(), trace_types_lock * ensures that i_cdev was already initialized. */ static inline int tracing_get_cpu(struct inode *inode) { if (inode->i_cdev) /* See trace_create_cpu_file() */ return (long)inode->i_cdev - 1; return RING_BUFFER_ALL_CPUS; } static const struct seq_operations tracer_seq_ops = { .start = s_start, .next = s_next, .stop = s_stop, .show = s_show, }; /* * Note, as iter itself can be allocated and freed in different * ways, this function is only used to free its content, and not * the iterator itself. The only requirement to all the allocations * is that it must zero all fields (kzalloc), as freeing works with * ethier allocated content or NULL. */ static void free_trace_iter_content(struct trace_iterator *iter) { /* The fmt is either NULL, allocated or points to static_fmt_buf */ if (iter->fmt != static_fmt_buf) kfree(iter->fmt); kfree(iter->temp); kfree(iter->buffer_iter); mutex_destroy(&iter->mutex); free_cpumask_var(iter->started); } static struct trace_iterator * __tracing_open(struct inode *inode, struct file *file, bool snapshot) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; if (tracing_disabled) return ERR_PTR(-ENODEV); iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter)); if (!iter) return ERR_PTR(-ENOMEM); iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter), GFP_KERNEL); if (!iter->buffer_iter) goto release; /* * trace_find_next_entry() may need to save off iter->ent. * It will place it into the iter->temp buffer. As most * events are less than 128, allocate a buffer of that size. * If one is greater, then trace_find_next_entry() will * allocate a new buffer to adjust for the bigger iter->ent. * It's not critical if it fails to get allocated here. */ iter->temp = kmalloc(128, GFP_KERNEL); if (iter->temp) iter->temp_size = 128; /* * trace_event_printf() may need to modify given format * string to replace %p with %px so that it shows real address * instead of hash value. However, that is only for the event * tracing, other tracer may not need. Defer the allocation * until it is needed. */ iter->fmt = NULL; iter->fmt_size = 0; mutex_lock(&trace_types_lock); iter->trace = tr->current_trace; if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL)) goto fail; iter->tr = tr; #ifdef CONFIG_TRACER_MAX_TRACE /* Currently only the top directory has a snapshot */ if (tr->current_trace->print_max || snapshot) iter->array_buffer = &tr->max_buffer; else #endif iter->array_buffer = &tr->array_buffer; iter->snapshot = snapshot; iter->pos = -1; iter->cpu_file = tracing_get_cpu(inode); mutex_init(&iter->mutex); /* Notify the tracer early; before we stop tracing. */ if (iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->array_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* * If pause-on-trace is enabled, then stop the trace while * dumping, unless this is the "snapshot" file */ if (!iter->snapshot && (tr->trace_flags & TRACE_ITER_PAUSE_ON_TRACE)) tracing_stop_tr(tr); if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->array_buffer->buffer, cpu, GFP_KERNEL); } ring_buffer_read_prepare_sync(); for_each_tracing_cpu(cpu) { ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } } else { cpu = iter->cpu_file; iter->buffer_iter[cpu] = ring_buffer_read_prepare(iter->array_buffer->buffer, cpu, GFP_KERNEL); ring_buffer_read_prepare_sync(); ring_buffer_read_start(iter->buffer_iter[cpu]); tracing_iter_reset(iter, cpu); } mutex_unlock(&trace_types_lock); return iter; fail: mutex_unlock(&trace_types_lock); free_trace_iter_content(iter); release: seq_release_private(inode, file); return ERR_PTR(-ENOMEM); } int tracing_open_generic(struct inode *inode, struct file *filp) { int ret; ret = tracing_check_open_get_tr(NULL); if (ret) return ret; filp->private_data = inode->i_private; return 0; } bool tracing_is_disabled(void) { return (tracing_disabled) ? true: false; } /* * Open and update trace_array ref count. * Must have the current trace_array passed to it. */ int tracing_open_generic_tr(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; filp->private_data = inode->i_private; return 0; } /* * The private pointer of the inode is the trace_event_file. * Update the tr ref count associated to it. */ int tracing_open_file_tr(struct inode *inode, struct file *filp) { struct trace_event_file *file = inode->i_private; int ret; ret = tracing_check_open_get_tr(file->tr); if (ret) return ret; mutex_lock(&event_mutex); /* Fail if the file is marked for removal */ if (file->flags & EVENT_FILE_FL_FREED) { trace_array_put(file->tr); ret = -ENODEV; } else { event_file_get(file); } mutex_unlock(&event_mutex); if (ret) return ret; filp->private_data = inode->i_private; return 0; } int tracing_release_file_tr(struct inode *inode, struct file *filp) { struct trace_event_file *file = inode->i_private; trace_array_put(file->tr); event_file_put(file); return 0; } int tracing_single_release_file_tr(struct inode *inode, struct file *filp) { tracing_release_file_tr(inode, filp); return single_release(inode, filp); } static int tracing_mark_open(struct inode *inode, struct file *filp) { stream_open(inode, filp); return tracing_open_generic_tr(inode, filp); } static int tracing_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m = file->private_data; struct trace_iterator *iter; int cpu; if (!(file->f_mode & FMODE_READ)) { trace_array_put(tr); return 0; } /* Writes do not use seq_file */ iter = m->private; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { if (iter->buffer_iter[cpu]) ring_buffer_read_finish(iter->buffer_iter[cpu]); } if (iter->trace && iter->trace->close) iter->trace->close(iter); if (!iter->snapshot && tr->stop_count) /* reenable tracing if it was previously enabled */ tracing_start_tr(tr); __trace_array_put(tr); mutex_unlock(&trace_types_lock); free_trace_iter_content(iter); seq_release_private(inode, file); return 0; } int tracing_release_generic_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return 0; } static int tracing_single_release_tr(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return single_release(inode, file); } static int tracing_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; /* If this file was open for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { int cpu = tracing_get_cpu(inode); struct array_buffer *trace_buf = &tr->array_buffer; #ifdef CONFIG_TRACER_MAX_TRACE if (tr->current_trace->print_max) trace_buf = &tr->max_buffer; #endif if (cpu == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(trace_buf); else tracing_reset_cpu(trace_buf, cpu); } if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, false); if (IS_ERR(iter)) ret = PTR_ERR(iter); else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; } if (ret < 0) trace_array_put(tr); return ret; } /* * Some tracers are not suitable for instance buffers. * A tracer is always available for the global array (toplevel) * or if it explicitly states that it is. */ static bool trace_ok_for_array(struct tracer *t, struct trace_array *tr) { #ifdef CONFIG_TRACER_SNAPSHOT /* arrays with mapped buffer range do not have snapshots */ if (tr->range_addr_start && t->use_max_tr) return false; #endif return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances; } /* Find the next tracer that this trace array may use */ static struct tracer * get_tracer_for_array(struct trace_array *tr, struct tracer *t) { while (t && !trace_ok_for_array(t, tr)) t = t->next; return t; } static void * t_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t = v; (*pos)++; if (t) t = get_tracer_for_array(tr, t->next); return t; } static void *t_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; struct tracer *t; loff_t l = 0; mutex_lock(&trace_types_lock); t = get_tracer_for_array(tr, trace_types); for (; t && l < *pos; t = t_next(m, t, &l)) ; return t; } static void t_stop(struct seq_file *m, void *p) { mutex_unlock(&trace_types_lock); } static int t_show(struct seq_file *m, void *v) { struct tracer *t = v; if (!t) return 0; seq_puts(m, t->name); if (t->next) seq_putc(m, ' '); else seq_putc(m, '\n'); return 0; } static const struct seq_operations show_traces_seq_ops = { .start = t_start, .next = t_next, .stop = t_stop, .show = t_show, }; static int show_traces_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct seq_file *m; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = seq_open(file, &show_traces_seq_ops); if (ret) { trace_array_put(tr); return ret; } m = file->private_data; m->private = tr; return 0; } static int tracing_seq_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); return seq_release(inode, file); } static ssize_t tracing_write_stub(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { return count; } loff_t tracing_lseek(struct file *file, loff_t offset, int whence) { int ret; if (file->f_mode & FMODE_READ) ret = seq_lseek(file, offset, whence); else file->f_pos = ret = 0; return ret; } static const struct file_operations tracing_fops = { .open = tracing_open, .read = seq_read, .read_iter = seq_read_iter, .splice_read = copy_splice_read, .write = tracing_write_stub, .llseek = tracing_lseek, .release = tracing_release, }; static const struct file_operations show_traces_fops = { .open = show_traces_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_seq_release, }; static ssize_t tracing_cpumask_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; char *mask_str; int len; len = snprintf(NULL, 0, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)) + 1; mask_str = kmalloc(len, GFP_KERNEL); if (!mask_str) return -ENOMEM; len = snprintf(mask_str, len, "%*pb\n", cpumask_pr_args(tr->tracing_cpumask)); if (len >= count) { count = -EINVAL; goto out_err; } count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); out_err: kfree(mask_str); return count; } int tracing_set_cpumask(struct trace_array *tr, cpumask_var_t tracing_cpumask_new) { int cpu; if (!tr) return -EINVAL; local_irq_disable(); arch_spin_lock(&tr->max_lock); for_each_tracing_cpu(cpu) { /* * Increase/decrease the disabled counter if we are * about to flip a bit in the cpumask: */ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); #endif } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); #endif } } arch_spin_unlock(&tr->max_lock); local_irq_enable(); cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); return 0; } static ssize_t tracing_cpumask_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *ppos) { struct trace_array *tr = file_inode(filp)->i_private; cpumask_var_t tracing_cpumask_new; int err; if (count == 0 || count > KMALLOC_MAX_SIZE) return -EINVAL; if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; err = cpumask_parse_user(ubuf, count, tracing_cpumask_new); if (err) goto err_free; err = tracing_set_cpumask(tr, tracing_cpumask_new); if (err) goto err_free; free_cpumask_var(tracing_cpumask_new); return count; err_free: free_cpumask_var(tracing_cpumask_new); return err; } static const struct file_operations tracing_cpumask_fops = { .open = tracing_open_generic_tr, .read = tracing_cpumask_read, .write = tracing_cpumask_write, .release = tracing_release_generic_tr, .llseek = generic_file_llseek, }; static int tracing_trace_options_show(struct seq_file *m, void *v) { struct tracer_opt *trace_opts; struct trace_array *tr = m->private; u32 tracer_flags; int i; guard(mutex)(&trace_types_lock); tracer_flags = tr->current_trace->flags->val; trace_opts = tr->current_trace->flags->opts; for (i = 0; trace_options[i]; i++) { if (tr->trace_flags & (1 << i)) seq_printf(m, "%s\n", trace_options[i]); else seq_printf(m, "no%s\n", trace_options[i]); } for (i = 0; trace_opts[i].name; i++) { if (tracer_flags & trace_opts[i].bit) seq_printf(m, "%s\n", trace_opts[i].name); else seq_printf(m, "no%s\n", trace_opts[i].name); } return 0; } static int __set_tracer_option(struct trace_array *tr, struct tracer_flags *tracer_flags, struct tracer_opt *opts, int neg) { struct tracer *trace = tracer_flags->trace; int ret; ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); if (ret) return ret; if (neg) tracer_flags->val &= ~opts->bit; else tracer_flags->val |= opts->bit; return 0; } /* Try to assign a tracer specific option */ static int set_tracer_option(struct trace_array *tr, char *cmp, int neg) { struct tracer *trace = tr->current_trace; struct tracer_flags *tracer_flags = trace->flags; struct tracer_opt *opts = NULL; int i; for (i = 0; tracer_flags->opts[i].name; i++) { opts = &tracer_flags->opts[i]; if (strcmp(cmp, opts->name) == 0) return __set_tracer_option(tr, trace->flags, opts, neg); } return -EINVAL; } /* Some tracers require overwrite to stay enabled */ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) { if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) return -1; return 0; } int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled) { if ((mask == TRACE_ITER_RECORD_TGID) || (mask == TRACE_ITER_RECORD_CMD) || (mask == TRACE_ITER_TRACE_PRINTK)) lockdep_assert_held(&event_mutex); /* do nothing if flag is already set */ if (!!(tr->trace_flags & mask) == !!enabled) return 0; /* Give the tracer a chance to approve the change */ if (tr->current_trace->flag_changed) if (tr->current_trace->flag_changed(tr, mask, !!enabled)) return -EINVAL; if (mask == TRACE_ITER_TRACE_PRINTK) { if (enabled) { update_printk_trace(tr); } else { /* * The global_trace cannot clear this. * It's flag only gets cleared if another instance sets it. */ if (printk_trace == &global_trace) return -EINVAL; /* * An instance must always have it set. * by default, that's the global_trace instane. */ if (printk_trace == tr) update_printk_trace(&global_trace); } } if (enabled) tr->trace_flags |= mask; else tr->trace_flags &= ~mask; if (mask == TRACE_ITER_RECORD_CMD) trace_event_enable_cmd_record(enabled); if (mask == TRACE_ITER_RECORD_TGID) { if (trace_alloc_tgid_map() < 0) { tr->trace_flags &= ~TRACE_ITER_RECORD_TGID; return -ENOMEM; } trace_event_enable_tgid_record(enabled); } if (mask == TRACE_ITER_EVENT_FORK) trace_event_follow_fork(tr, enabled); if (mask == TRACE_ITER_FUNC_FORK) ftrace_pid_follow_fork(tr, enabled); if (mask == TRACE_ITER_OVERWRITE) { ring_buffer_change_overwrite(tr->array_buffer.buffer, enabled); #ifdef CONFIG_TRACER_MAX_TRACE ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled); #endif } if (mask == TRACE_ITER_PRINTK) { trace_printk_start_stop_comm(enabled); trace_printk_control(enabled); } return 0; } int trace_set_options(struct trace_array *tr, char *option) { char *cmp; int neg = 0; int ret; size_t orig_len = strlen(option); int len; cmp = strstrip(option); len = str_has_prefix(cmp, "no"); if (len) neg = 1; cmp += len; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = match_string(trace_options, -1, cmp); /* If no option could be set, test the specific tracer options */ if (ret < 0) ret = set_tracer_option(tr, cmp, neg); else ret = set_tracer_flag(tr, 1 << ret, !neg); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); /* * If the first trailing whitespace is replaced with '\0' by strstrip, * turn it back into a space. */ if (orig_len > strlen(option)) option[strlen(option)] = ' '; return ret; } static void __init apply_trace_boot_options(void) { char *buf = trace_boot_options_buf; char *option; while (true) { option = strsep(&buf, ","); if (!option) break; if (*option) trace_set_options(&global_trace, option); /* Put back the comma to allow this to be called again */ if (buf) *(buf - 1) = ','; } } static ssize_t tracing_trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; ret = trace_set_options(tr, buf); if (ret < 0) return ret; *ppos += cnt; return cnt; } static int tracing_trace_options_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = single_open(file, tracing_trace_options_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static const struct file_operations tracing_iter_fops = { .open = tracing_trace_options_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_trace_options_write, }; static const char readme_msg[] = "tracing mini-HOWTO:\n\n" "By default tracefs removes all OTH file permission bits.\n" "When mounting tracefs an optional group id can be specified\n" "which adds the group to every directory and file in tracefs:\n\n" "\t e.g. mount -t tracefs [-o [gid=<gid>]] nodev /sys/kernel/tracing\n\n" "# echo 0 > tracing_on : quick way to disable tracing\n" "# echo 1 > tracing_on : quick way to re-enable tracing\n\n" " Important files:\n" " trace\t\t\t- The static contents of the buffer\n" "\t\t\t To clear the buffer write into this file: echo > trace\n" " trace_pipe\t\t- A consuming read to see the contents of the buffer\n" " current_tracer\t- function and latency tracers\n" " available_tracers\t- list of configured tracers for current_tracer\n" " error_log\t- error log for failed commands (that support it)\n" " buffer_size_kb\t- view and modify size of per cpu buffer\n" " buffer_total_size_kb - view total size of all cpu buffers\n\n" " trace_clock\t\t- change the clock used to order events\n" " local: Per cpu clock but may not be synced across CPUs\n" " global: Synced across CPUs but slows tracing down.\n" " counter: Not a clock, but just an increment\n" " uptime: Jiffy counter from time of boot\n" " perf: Same clock that perf events use\n" #ifdef CONFIG_X86_64 " x86-tsc: TSC cycle counter\n" #endif "\n timestamp_mode\t- view the mode used to timestamp events\n" " delta: Delta difference against a buffer-wide timestamp\n" " absolute: Absolute (standalone) timestamp\n" "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n" "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n" " tracing_cpumask\t- Limit which CPUs to trace\n" " instances\t\t- Make sub-buffers with: mkdir instances/foo\n" "\t\t\t Remove sub-buffer with rmdir\n" " trace_options\t\t- Set format or modify how tracing happens\n" "\t\t\t Disable an option by prefixing 'no' to the\n" "\t\t\t option name\n" " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n" #ifdef CONFIG_DYNAMIC_FTRACE "\n available_filter_functions - list of functions that can be filtered on\n" " set_ftrace_filter\t- echo function name in here to only trace these\n" "\t\t\t functions\n" "\t accepts: func_full_name or glob-matching-pattern\n" "\t modules: Can select a group via module\n" "\t Format: :mod:<module-name>\n" "\t example: echo :mod:ext3 > set_ftrace_filter\n" "\t triggers: a command to perform when function is hit\n" "\t Format: <function>:<trigger>[:count]\n" "\t trigger: traceon, traceoff\n" "\t\t enable_event:<system>:<event>\n" "\t\t disable_event:<system>:<event>\n" #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif "\t\t dump\n" "\t\t cpudump\n" "\t example: echo do_fault:traceoff > set_ftrace_filter\n" "\t echo do_trap:traceoff:3 > set_ftrace_filter\n" "\t The first one will disable tracing every time do_fault is hit\n" "\t The second will disable tracing at most 3 times when do_trap is hit\n" "\t The first time do trap is hit and it disables tracing, the\n" "\t counter will decrement to 2. If tracing is already disabled,\n" "\t the counter will not decrement. It only decrements when the\n" "\t trigger did work\n" "\t To remove trigger without count:\n" "\t echo '!<function>:<trigger> > set_ftrace_filter\n" "\t To remove trigger with a count:\n" "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n" " set_ftrace_notrace\t- echo function name in here to never trace.\n" "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n" "\t modules: Can select a group via module command :mod:\n" "\t Does not accept triggers\n" #endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_TRACER " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n" "\t\t (function)\n" " set_ftrace_notrace_pid\t- Write pid(s) to not function trace those pids\n" "\t\t (function)\n" #endif #ifdef CONFIG_FUNCTION_GRAPH_TRACER " set_graph_function\t- Trace the nested calls of a function (function_graph)\n" " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n" " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\n snapshot\t\t- Like 'trace' but shows the content of the static\n" "\t\t\t snapshot buffer. Read the contents for more\n" "\t\t\t information\n" #endif #ifdef CONFIG_STACK_TRACER " stack_trace\t\t- Shows the max stack trace when active\n" " stack_max_size\t- Shows current max stack size that was traced\n" "\t\t\t Write into this file to reset the max size (trigger a\n" "\t\t\t new trace)\n" #ifdef CONFIG_DYNAMIC_FTRACE " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n" "\t\t\t traces\n" #endif #endif /* CONFIG_STACK_TRACER */ #ifdef CONFIG_DYNAMIC_EVENTS " dynamic_events\t\t- Create/append/remove/show the generic dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_KPROBE_EVENTS " kprobe_events\t\t- Create/append/remove/show the kernel dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #ifdef CONFIG_UPROBE_EVENTS " uprobe_events\t\t- Create/append/remove/show the userspace dynamic events\n" "\t\t\t Write into this file to define/undefine new trace events.\n" #endif #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) || \ defined(CONFIG_FPROBE_EVENTS) "\t accepts: event-definitions (one definition per line)\n" #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS) "\t Format: p[:[<group>/][<event>]] <place> [<args>]\n" "\t r[maxactive][:[<group>/][<event>]] <place> [<args>]\n" #endif #ifdef CONFIG_FPROBE_EVENTS "\t f[:[<group>/][<event>]] <func-name>[%return] [<args>]\n" "\t t[:[<group>/][<event>]] <tracepoint> [<args>]\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t s:[synthetic/]<event> <field> [<field>]\n" #endif "\t e[:[<group>/][<event>]] <attached-group>.<attached-event> [<args>] [if <filter>]\n" "\t -:[<group>/][<event>]\n" #ifdef CONFIG_KPROBE_EVENTS "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n" "place (kretprobe): [<module>:]<symbol>[+<offset>]%return|<memaddr>\n" #endif #ifdef CONFIG_UPROBE_EVENTS " place (uprobe): <path>:<offset>[%return][(ref_ctr_offset)]\n" #endif "\t args: <name>=fetcharg[:type]\n" "\t fetcharg: (%<register>|$<efield>), @<address>, @<symbol>[+|-<offset>],\n" #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API "\t $stack<index>, $stack, $retval, $comm, $arg<N>,\n" #ifdef CONFIG_PROBE_EVENTS_BTF_ARGS "\t <argname>[->field[->field|.field...]],\n" #endif #else "\t $stack<index>, $stack, $retval, $comm,\n" #endif "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n" "\t kernel return probes support: $retval, $arg<N>, $comm\n" "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n" "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n" "\t symstr, %pd/%pD, <type>\\[<array-size>\\]\n" #ifdef CONFIG_HIST_TRIGGERS "\t field: <stype> <name>;\n" "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" "\t [unsigned] char/int/long\n" #endif "\t efield: For event probes ('e' types), the field is on of the fields\n" "\t of the <attached-group>/<attached-event>.\n" #endif " set_event\t\t- Enables events by name written into it\n" "\t\t\t Can enable module events via: :mod:<module>\n" " events/\t\t- Directory containing all trace event subsystems:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all events\n" " events/<system>/\t- Directory containing all trace events for <system>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n" "\t\t\t events\n" " filter\t\t- If set, only events passing filter are traced\n" " events/<system>/<event>/\t- Directory containing control files for\n" "\t\t\t <event>:\n" " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n" " filter\t\t- If set, only events passing filter are traced\n" " trigger\t\t- If set, a command to perform when event is hit\n" "\t Format: <trigger>[:count][if <filter>]\n" "\t trigger: traceon, traceoff\n" "\t enable_event:<system>:<event>\n" "\t disable_event:<system>:<event>\n" #ifdef CONFIG_HIST_TRIGGERS "\t enable_hist:<system>:<event>\n" "\t disable_hist:<system>:<event>\n" #endif #ifdef CONFIG_STACKTRACE "\t\t stacktrace\n" #endif #ifdef CONFIG_TRACER_SNAPSHOT "\t\t snapshot\n" #endif #ifdef CONFIG_HIST_TRIGGERS "\t\t hist (see below)\n" #endif "\t example: echo traceoff > events/block/block_unplug/trigger\n" "\t echo traceoff:3 > events/block/block_unplug/trigger\n" "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n" "\t events/block/block_unplug/trigger\n" "\t The first disables tracing every time block_unplug is hit.\n" "\t The second disables tracing the first 3 times block_unplug is hit.\n" "\t The third enables the kmalloc event the first 3 times block_unplug\n" "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n" "\t Like function triggers, the counter is only decremented if it\n" "\t enabled or disabled tracing.\n" "\t To remove a trigger without a count:\n" "\t echo '!<trigger> > <system>/<event>/trigger\n" "\t To remove a trigger with a count:\n" "\t echo '!<trigger>:0 > <system>/<event>/trigger\n" "\t Filters can be ignored when removing a trigger.\n" #ifdef CONFIG_HIST_TRIGGERS " hist trigger\t- If set, event hits are aggregated into a hash table\n" "\t Format: hist:keys=<field1[,field2,...]>\n" "\t [:<var1>=<field|var_ref|numeric_literal>[,<var2>=...]]\n" "\t [:values=<field1[,field2,...]>]\n" "\t [:sort=<field1[,field2,...]>]\n" "\t [:size=#entries]\n" "\t [:pause][:continue][:clear]\n" "\t [:name=histname1]\n" "\t [:nohitcount]\n" "\t [:<handler>.<action>]\n" "\t [if <filter>]\n\n" "\t Note, special fields can be used as well:\n" "\t common_timestamp - to record current timestamp\n" "\t common_cpu - to record the CPU the event happened on\n" "\n" "\t A hist trigger variable can be:\n" "\t - a reference to a field e.g. x=current_timestamp,\n" "\t - a reference to another variable e.g. y=$x,\n" "\t - a numeric literal: e.g. ms_per_sec=1000,\n" "\t - an arithmetic expression: e.g. time_secs=current_timestamp/1000\n" "\n" "\t hist trigger arithmetic expressions support addition(+), subtraction(-),\n" "\t multiplication(*) and division(/) operators. An operand can be either a\n" "\t variable reference, field or numeric literal.\n" "\n" "\t When a matching event is hit, an entry is added to a hash\n" "\t table using the key(s) and value(s) named, and the value of a\n" "\t sum called 'hitcount' is incremented. Keys and values\n" "\t correspond to fields in the event's format description. Keys\n" "\t can be any field, or the special string 'common_stacktrace'.\n" "\t Compound keys consisting of up to two fields can be specified\n" "\t by the 'keys' keyword. Values must correspond to numeric\n" "\t fields. Sort keys consisting of up to two fields can be\n" "\t specified using the 'sort' keyword. The sort direction can\n" "\t be modified by appending '.descending' or '.ascending' to a\n" "\t sort field. The 'size' parameter can be used to specify more\n" "\t or fewer than the default 2048 entries for the hashtable size.\n" "\t If a hist trigger is given a name using the 'name' parameter,\n" "\t its histogram data will be shared with other triggers of the\n" "\t same name, and trigger hits will update this common data.\n\n" "\t Reading the 'hist' file for the event will dump the hash\n" "\t table in its entirety to stdout. If there are multiple hist\n" "\t triggers attached to an event, there will be a table for each\n" "\t trigger in the output. The table displayed for a named\n" "\t trigger will be the same as any other instance having the\n" "\t same name. The default format used to display a given field\n" "\t can be modified by appending any of the following modifiers\n" "\t to the field name, as applicable:\n\n" "\t .hex display a number as a hex value\n" "\t .sym display an address as a symbol\n" "\t .sym-offset display an address as a symbol and offset\n" "\t .execname display a common_pid as a program name\n" "\t .syscall display a syscall id as a syscall name\n" "\t .log2 display log2 value rather than raw number\n" "\t .buckets=size display values in groups of size rather than raw number\n" "\t .usecs display a common_timestamp in microseconds\n" "\t .percent display a number of percentage value\n" "\t .graph display a bar-graph of a value\n\n" "\t The 'pause' parameter can be used to pause an existing hist\n" "\t trigger or to start a hist trigger but not log any events\n" "\t until told to do so. 'continue' can be used to start or\n" "\t restart a paused hist trigger.\n\n" "\t The 'clear' parameter will clear the contents of a running\n" "\t hist trigger and leave its current paused/active state\n" "\t unchanged.\n\n" "\t The 'nohitcount' (or NOHC) parameter will suppress display of\n" "\t raw hitcount in the histogram.\n\n" "\t The enable_hist and disable_hist triggers can be used to\n" "\t have one event conditionally start and stop another event's\n" "\t already-attached hist trigger. The syntax is analogous to\n" "\t the enable_event and disable_event triggers.\n\n" "\t Hist trigger handlers and actions are executed whenever a\n" "\t a histogram entry is added or updated. They take the form:\n\n" "\t <handler>.<action>\n\n" "\t The available handlers are:\n\n" "\t onmatch(matching.event) - invoke on addition or update\n" "\t onmax(var) - invoke if var exceeds current max\n" "\t onchange(var) - invoke action if var changes\n\n" "\t The available actions are:\n\n" "\t trace(<synthetic_event>,param list) - generate synthetic event\n" "\t save(field,...) - save current event fields\n" #ifdef CONFIG_TRACER_SNAPSHOT "\t snapshot() - snapshot the trace buffer\n\n" #endif #ifdef CONFIG_SYNTH_EVENTS " events/synthetic_events\t- Create/append/remove/show synthetic events\n" "\t Write into this file to define/undefine new synthetic events.\n" "\t example: echo 'myevent u64 lat; char name[]; long[] stack' >> synthetic_events\n" #endif #endif ; static ssize_t tracing_readme_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return simple_read_from_buffer(ubuf, cnt, ppos, readme_msg, strlen(readme_msg)); } static const struct file_operations tracing_readme_fops = { .open = tracing_open_generic, .read = tracing_readme_read, .llseek = generic_file_llseek, }; #ifdef CONFIG_TRACE_EVAL_MAP_FILE static union trace_eval_map_item * update_eval_map(union trace_eval_map_item *ptr) { if (!ptr->map.eval_string) { if (ptr->tail.next) { ptr = ptr->tail.next; /* Set ptr to the next real item (skip head) */ ptr++; } else return NULL; } return ptr; } static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos) { union trace_eval_map_item *ptr = v; /* * Paranoid! If ptr points to end, we don't want to increment past it. * This really should never happen. */ (*pos)++; ptr = update_eval_map(ptr); if (WARN_ON_ONCE(!ptr)) return NULL; ptr++; ptr = update_eval_map(ptr); return ptr; } static void *eval_map_start(struct seq_file *m, loff_t *pos) { union trace_eval_map_item *v; loff_t l = 0; mutex_lock(&trace_eval_mutex); v = trace_eval_maps; if (v) v++; while (v && l < *pos) { v = eval_map_next(m, v, &l); } return v; } static void eval_map_stop(struct seq_file *m, void *v) { mutex_unlock(&trace_eval_mutex); } static int eval_map_show(struct seq_file *m, void *v) { union trace_eval_map_item *ptr = v; seq_printf(m, "%s %ld (%s)\n", ptr->map.eval_string, ptr->map.eval_value, ptr->map.system); return 0; } static const struct seq_operations tracing_eval_map_seq_ops = { .start = eval_map_start, .next = eval_map_next, .stop = eval_map_stop, .show = eval_map_show, }; static int tracing_eval_map_open(struct inode *inode, struct file *filp) { int ret; ret = tracing_check_open_get_tr(NULL); if (ret) return ret; return seq_open(filp, &tracing_eval_map_seq_ops); } static const struct file_operations tracing_eval_map_fops = { .open = tracing_eval_map_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static inline union trace_eval_map_item * trace_eval_jmp_to_tail(union trace_eval_map_item *ptr) { /* Return tail of array given the head */ return ptr + ptr->head.length + 1; } static void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **stop; struct trace_eval_map **map; union trace_eval_map_item *map_array; union trace_eval_map_item *ptr; stop = start + len; /* * The trace_eval_maps contains the map plus a head and tail item, * where the head holds the module and length of array, and the * tail holds a pointer to the next list. */ map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL); if (!map_array) { pr_warn("Unable to allocate trace eval mapping\n"); return; } guard(mutex)(&trace_eval_mutex); if (!trace_eval_maps) trace_eval_maps = map_array; else { ptr = trace_eval_maps; for (;;) { ptr = trace_eval_jmp_to_tail(ptr); if (!ptr->tail.next) break; ptr = ptr->tail.next; } ptr->tail.next = map_array; } map_array->head.mod = mod; map_array->head.length = len; map_array++; for (map = start; (unsigned long)map < (unsigned long)stop; map++) { map_array->map = **map; map_array++; } memset(map_array, 0, sizeof(*map_array)); } static void trace_create_eval_file(struct dentry *d_tracer) { trace_create_file("eval_map", TRACE_MODE_READ, d_tracer, NULL, &tracing_eval_map_fops); } #else /* CONFIG_TRACE_EVAL_MAP_FILE */ static inline void trace_create_eval_file(struct dentry *d_tracer) { } static inline void trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start, int len) { } #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */ static void trace_insert_eval_map(struct module *mod, struct trace_eval_map **start, int len) { struct trace_eval_map **map; if (len <= 0) return; map = start; trace_event_eval_update(map, len); trace_insert_eval_map_file(mod, start, len); } static ssize_t tracing_set_trace_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+2]; int r; mutex_lock(&trace_types_lock); r = sprintf(buf, "%s\n", tr->current_trace->name); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } int tracer_init(struct tracer *t, struct trace_array *tr) { tracing_reset_online_cpus(&tr->array_buffer); return t->init(tr); } static void set_buffer_entries(struct array_buffer *buf, unsigned long val) { int cpu; for_each_tracing_cpu(cpu) per_cpu_ptr(buf->data, cpu)->entries = val; } static void update_buffer_entries(struct array_buffer *buf, int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { set_buffer_entries(buf, ring_buffer_size(buf->buffer, 0)); } else { per_cpu_ptr(buf->data, cpu)->entries = ring_buffer_size(buf->buffer, cpu); } } #ifdef CONFIG_TRACER_MAX_TRACE /* resize @tr's buffer to the size of @size_tr's entries */ static int resize_buffer_duplicate_size(struct array_buffer *trace_buf, struct array_buffer *size_buf, int cpu_id) { int cpu, ret = 0; if (cpu_id == RING_BUFFER_ALL_CPUS) { for_each_tracing_cpu(cpu) { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu)->entries, cpu); if (ret < 0) break; per_cpu_ptr(trace_buf->data, cpu)->entries = per_cpu_ptr(size_buf->data, cpu)->entries; } } else { ret = ring_buffer_resize(trace_buf->buffer, per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id); if (ret == 0) per_cpu_ptr(trace_buf->data, cpu_id)->entries = per_cpu_ptr(size_buf->data, cpu_id)->entries; } return ret; } #endif /* CONFIG_TRACER_MAX_TRACE */ static int __tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu) { int ret; /* * If kernel or user changes the size of the ring buffer * we use the size that was given, and we can forget about * expanding it later. */ trace_set_ring_buffer_expanded(tr); /* May be called before buffers are initialized */ if (!tr->array_buffer.buffer) return 0; /* Do not allow tracing while resizing ring buffer */ tracing_stop_tr(tr); ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu); if (ret < 0) goto out_start; #ifdef CONFIG_TRACER_MAX_TRACE if (!tr->allocated_snapshot) goto out; ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu); if (ret < 0) { int r = resize_buffer_duplicate_size(&tr->array_buffer, &tr->array_buffer, cpu); if (r < 0) { /* * AARGH! We are left with different * size max buffer!!!! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the size of the main buffer, but failed to * update the size of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ WARN_ON(1); tracing_disabled = 1; } goto out_start; } update_buffer_entries(&tr->max_buffer, cpu); out: #endif /* CONFIG_TRACER_MAX_TRACE */ update_buffer_entries(&tr->array_buffer, cpu); out_start: tracing_start_tr(tr); return ret; } ssize_t tracing_resize_ring_buffer(struct trace_array *tr, unsigned long size, int cpu_id) { int ret; guard(mutex)(&trace_types_lock); if (cpu_id != RING_BUFFER_ALL_CPUS) { /* make sure, this cpu is enabled in the mask */ if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) return -EINVAL; } ret = __tracing_resize_ring_buffer(tr, size, cpu_id); if (ret < 0) ret = -ENOMEM; return ret; } static void update_last_data(struct trace_array *tr) { if (!tr->text_delta && !tr->data_delta) return; /* * Need to clear all CPU buffers as there cannot be events * from the previous boot mixed with events with this boot * as that will cause a confusing trace. Need to clear all * CPU buffers, even for those that may currently be offline. */ tracing_reset_all_cpus(&tr->array_buffer); /* Using current data now */ tr->text_delta = 0; tr->data_delta = 0; } /** * tracing_update_buffers - used by tracing facility to expand ring buffers * @tr: The tracing instance * * To save on memory when the tracing is never used on a system with it * configured in. The ring buffers are set to a minimum size. But once * a user starts to use the tracing facility, then they need to grow * to their default size. * * This function is to be called when a tracer is about to be used. */ int tracing_update_buffers(struct trace_array *tr) { int ret = 0; mutex_lock(&trace_types_lock); update_last_data(tr); if (!tr->ring_buffer_expanded) ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); mutex_unlock(&trace_types_lock); return ret; } struct trace_option_dentry; static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer); /* * Used to clear out the tracer before deletion of an instance. * Must have trace_types_lock held. */ static void tracing_set_nop(struct trace_array *tr) { if (tr->current_trace == &nop_trace) return; tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); tr->current_trace = &nop_trace; } static bool tracer_options_updated; static void add_tracer_options(struct trace_array *tr, struct tracer *t) { /* Only enable if the directory has been created already. */ if (!tr->dir) return; /* Only create trace option files after update_tracer_options finish */ if (!tracer_options_updated) return; create_trace_option_files(tr, t); } int tracing_set_tracer(struct trace_array *tr, const char *buf) { struct tracer *t; #ifdef CONFIG_TRACER_MAX_TRACE bool had_max_tr; #endif int ret; guard(mutex)(&trace_types_lock); update_last_data(tr); if (!tr->ring_buffer_expanded) { ret = __tracing_resize_ring_buffer(tr, trace_buf_size, RING_BUFFER_ALL_CPUS); if (ret < 0) return ret; ret = 0; } for (t = trace_types; t; t = t->next) { if (strcmp(t->name, buf) == 0) break; } if (!t) return -EINVAL; if (t == tr->current_trace) return 0; #ifdef CONFIG_TRACER_SNAPSHOT if (t->use_max_tr) { local_irq_disable(); arch_spin_lock(&tr->max_lock); ret = tr->cond_snapshot ? -EBUSY : 0; arch_spin_unlock(&tr->max_lock); local_irq_enable(); if (ret) return ret; } #endif /* Some tracers won't work on kernel command line */ if (system_state < SYSTEM_RUNNING && t->noboot) { pr_warn("Tracer '%s' is not allowed on command line, ignored\n", t->name); return -EINVAL; } /* Some tracers are only allowed for the top level buffer */ if (!trace_ok_for_array(t, tr)) return -EINVAL; /* If trace pipe files are being read, we can't change the tracer */ if (tr->trace_ref) return -EBUSY; trace_branch_disable(); tr->current_trace->enabled--; if (tr->current_trace->reset) tr->current_trace->reset(tr); #ifdef CONFIG_TRACER_MAX_TRACE had_max_tr = tr->current_trace->use_max_tr; /* Current trace needs to be nop_trace before synchronize_rcu */ tr->current_trace = &nop_trace; if (had_max_tr && !t->use_max_tr) { /* * We need to make sure that the update_max_tr sees that * current_trace changed to nop_trace to keep it from * swapping the buffers after we resize it. * The update_max_tr is called from interrupts disabled * so a synchronized_sched() is sufficient. */ synchronize_rcu(); free_snapshot(tr); tracing_disarm_snapshot(tr); } if (!had_max_tr && t->use_max_tr) { ret = tracing_arm_snapshot_locked(tr); if (ret) return ret; } #else tr->current_trace = &nop_trace; #endif if (t->init) { ret = tracer_init(t, tr); if (ret) { #ifdef CONFIG_TRACER_MAX_TRACE if (t->use_max_tr) tracing_disarm_snapshot(tr); #endif return ret; } } tr->current_trace = t; tr->current_trace->enabled++; trace_branch_enable(tr); return 0; } static ssize_t tracing_set_trace_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[MAX_TRACER_SIZE+1]; char *name; size_t ret; int err; ret = cnt; if (cnt > MAX_TRACER_SIZE) cnt = MAX_TRACER_SIZE; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; name = strim(buf); err = tracing_set_tracer(tr, name); if (err) return err; *ppos += ret; return ret; } static ssize_t tracing_nsecs_read(unsigned long *ptr, char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[64]; int r; r = snprintf(buf, sizeof(buf), "%ld\n", *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr)); if (r > sizeof(buf)) r = sizeof(buf); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf, size_t cnt, loff_t *ppos) { unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; *ptr = val * 1000; return cnt; } static ssize_t tracing_thresh_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos); } static ssize_t tracing_thresh_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; int ret; guard(mutex)(&trace_types_lock); ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos); if (ret < 0) return ret; if (tr->current_trace->update_thresh) { ret = tr->current_trace->update_thresh(tr); if (ret < 0) return ret; } return cnt; } #ifdef CONFIG_TRACER_MAX_TRACE static ssize_t tracing_max_lat_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; return tracing_nsecs_read(&tr->max_latency, ubuf, cnt, ppos); } static ssize_t tracing_max_lat_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; return tracing_nsecs_write(&tr->max_latency, ubuf, cnt, ppos); } #endif static int open_pipe_on_cpu(struct trace_array *tr, int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { if (cpumask_empty(tr->pipe_cpumask)) { cpumask_setall(tr->pipe_cpumask); return 0; } } else if (!cpumask_test_cpu(cpu, tr->pipe_cpumask)) { cpumask_set_cpu(cpu, tr->pipe_cpumask); return 0; } return -EBUSY; } static void close_pipe_on_cpu(struct trace_array *tr, int cpu) { if (cpu == RING_BUFFER_ALL_CPUS) { WARN_ON(!cpumask_full(tr->pipe_cpumask)); cpumask_clear(tr->pipe_cpumask); } else { WARN_ON(!cpumask_test_cpu(cpu, tr->pipe_cpumask)); cpumask_clear_cpu(cpu, tr->pipe_cpumask); } } static int tracing_open_pipe(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; int cpu; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; mutex_lock(&trace_types_lock); cpu = tracing_get_cpu(inode); ret = open_pipe_on_cpu(tr, cpu); if (ret) goto fail_pipe_on_cpu; /* create a buffer to store the information to pass to userspace */ iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { ret = -ENOMEM; goto fail_alloc_iter; } trace_seq_init(&iter->seq); iter->trace = tr->current_trace; if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { ret = -ENOMEM; goto fail; } /* trace pipe does not show start of buffer */ cpumask_setall(iter->started); if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) iter->iter_flags |= TRACE_FILE_LAT_FMT; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; iter->tr = tr; iter->array_buffer = &tr->array_buffer; iter->cpu_file = cpu; mutex_init(&iter->mutex); filp->private_data = iter; if (iter->trace->pipe_open) iter->trace->pipe_open(iter); nonseekable_open(inode, filp); tr->trace_ref++; mutex_unlock(&trace_types_lock); return ret; fail: kfree(iter); fail_alloc_iter: close_pipe_on_cpu(tr, cpu); fail_pipe_on_cpu: __trace_array_put(tr); mutex_unlock(&trace_types_lock); return ret; } static int tracing_release_pipe(struct inode *inode, struct file *file) { struct trace_iterator *iter = file->private_data; struct trace_array *tr = inode->i_private; mutex_lock(&trace_types_lock); tr->trace_ref--; if (iter->trace->pipe_close) iter->trace->pipe_close(iter); close_pipe_on_cpu(tr, iter->cpu_file); mutex_unlock(&trace_types_lock); free_trace_iter_content(iter); kfree(iter); trace_array_put(tr); return 0; } static __poll_t trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table) { struct trace_array *tr = iter->tr; /* Iterators are static, they should be filled or empty */ if (trace_buffer_iter(iter, iter->cpu_file)) return EPOLLIN | EPOLLRDNORM; if (tr->trace_flags & TRACE_ITER_BLOCK) /* * Always select as readable when in blocking mode */ return EPOLLIN | EPOLLRDNORM; else return ring_buffer_poll_wait(iter->array_buffer->buffer, iter->cpu_file, filp, poll_table, iter->tr->buffer_percent); } static __poll_t tracing_poll_pipe(struct file *filp, poll_table *poll_table) { struct trace_iterator *iter = filp->private_data; return trace_poll(iter, filp, poll_table); } /* Must be called with iter->mutex held. */ static int tracing_wait_pipe(struct file *filp) { struct trace_iterator *iter = filp->private_data; int ret; while (trace_empty(iter)) { if ((filp->f_flags & O_NONBLOCK)) { return -EAGAIN; } /* * We block until we read something and tracing is disabled. * We still block if tracing is disabled, but we have never * read anything. This allows a user to cat this file, and * then enable tracing. But after we have read something, * we give an EOF when tracing is again disabled. * * iter->pos will be 0 if we haven't read anything. */ if (!tracer_tracing_is_on(iter->tr) && iter->pos) break; mutex_unlock(&iter->mutex); ret = wait_on_pipe(iter, 0); mutex_lock(&iter->mutex); if (ret) return ret; } return 1; } /* * Consumer reader. */ static ssize_t tracing_read_pipe(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_iterator *iter = filp->private_data; ssize_t sret; /* * Avoid more than one consumer on a single file descriptor * This is just a matter of traces coherency, the ring buffer itself * is protected. */ guard(mutex)(&iter->mutex); /* return any leftover data */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (sret != -EBUSY) return sret; trace_seq_init(&iter->seq); if (iter->trace->read) { sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); if (sret) return sret; } waitagain: sret = tracing_wait_pipe(filp); if (sret <= 0) return sret; /* stop when tracing is finished */ if (trace_empty(iter)) return 0; if (cnt >= TRACE_SEQ_BUFFER_SIZE) cnt = TRACE_SEQ_BUFFER_SIZE - 1; /* reset all but tr, trace, and overruns */ trace_iterator_reset(iter); cpumask_clear(iter->started); trace_seq_init(&iter->seq); trace_event_read_lock(); trace_access_lock(iter->cpu_file); while (trace_find_next_entry_inc(iter) != NULL) { enum print_line_t ret; int save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (ret == TRACE_TYPE_PARTIAL_LINE) { /* * If one print_trace_line() fills entire trace_seq in one shot, * trace_seq_to_user() will returns -EBUSY because save_len == 0, * In this case, we need to consume it, otherwise, loop will peek * this event next time, resulting in an infinite loop. */ if (save_len == 0) { iter->seq.full = 0; trace_seq_puts(&iter->seq, "[LINE TOO BIG]\n"); trace_consume(iter); break; } /* In other cases, don't print partial lines */ iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); if (trace_seq_used(&iter->seq) >= cnt) break; /* * Setting the full flag means we reached the trace_seq buffer * size and we should leave by partial output condition above. * One of the trace_seq_* functions is not used properly. */ WARN_ONCE(iter->seq.full, "full flag set for trace type %d", iter->ent->type); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); /* Now copy what we have to the user */ sret = trace_seq_to_user(&iter->seq, ubuf, cnt); if (iter->seq.readpos >= trace_seq_used(&iter->seq)) trace_seq_init(&iter->seq); /* * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) goto waitagain; return sret; } static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, unsigned int idx) { __free_page(spd->pages[idx]); } static size_t tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) { size_t count; int save_len; int ret; /* Seq buffer is page-sized, exactly what we need. */ for (;;) { save_len = iter->seq.seq.len; ret = print_trace_line(iter); if (trace_seq_has_overflowed(&iter->seq)) { iter->seq.seq.len = save_len; break; } /* * This should not be hit, because it should only * be set if the iter->seq overflowed. But check it * anyway to be safe. */ if (ret == TRACE_TYPE_PARTIAL_LINE) { iter->seq.seq.len = save_len; break; } count = trace_seq_used(&iter->seq) - save_len; if (rem < count) { rem = 0; iter->seq.seq.len = save_len; break; } if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(iter); rem -= count; if (!trace_find_next_entry_inc(iter)) { rem = 0; iter->ent = NULL; break; } } return rem; } static ssize_t tracing_splice_read_pipe(struct file *filp, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct page *pages_def[PIPE_DEF_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct trace_iterator *iter = filp->private_data; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &default_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, }; ssize_t ret; size_t rem; unsigned int i; if (splice_grow_spd(pipe, &spd)) return -ENOMEM; mutex_lock(&iter->mutex); if (iter->trace->splice_read) { ret = iter->trace->splice_read(iter, filp, ppos, pipe, len, flags); if (ret) goto out_err; } ret = tracing_wait_pipe(filp); if (ret <= 0) goto out_err; if (!iter->ent && !trace_find_next_entry_inc(iter)) { ret = -EFAULT; goto out_err; } trace_event_read_lock(); trace_access_lock(iter->cpu_file); /* Fill as many pages as possible. */ for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) { spd.pages[i] = alloc_page(GFP_KERNEL); if (!spd.pages[i]) break; rem = tracing_fill_pipe_page(rem, iter); /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), trace_seq_used(&iter->seq)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; spd.partial[i].len = trace_seq_used(&iter->seq); trace_seq_init(&iter->seq); } trace_access_unlock(iter->cpu_file); trace_event_read_unlock(); mutex_unlock(&iter->mutex); spd.nr_pages = i; if (i) ret = splice_to_pipe(pipe, &spd); else ret = 0; out: splice_shrink_spd(&spd); return ret; out_err: mutex_unlock(&iter->mutex); goto out; } static ssize_t tracing_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); char buf[64]; int r = 0; ssize_t ret; mutex_lock(&trace_types_lock); if (cpu == RING_BUFFER_ALL_CPUS) { int cpu, buf_size_same; unsigned long size; size = 0; buf_size_same = 1; /* check if all cpu sizes are same */ for_each_tracing_cpu(cpu) { /* fill in the size from first enabled cpu */ if (size == 0) size = per_cpu_ptr(tr->array_buffer.data, cpu)->entries; if (size != per_cpu_ptr(tr->array_buffer.data, cpu)->entries) { buf_size_same = 0; break; } } if (buf_size_same) { if (!tr->ring_buffer_expanded) r = sprintf(buf, "%lu (expanded: %lu)\n", size >> 10, trace_buf_size >> 10); else r = sprintf(buf, "%lu\n", size >> 10); } else r = sprintf(buf, "X\n"); } else r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10); mutex_unlock(&trace_types_lock); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); return ret; } static ssize_t tracing_entries_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; /* must have at least 1 entry */ if (!val) return -EINVAL; /* value is in KB */ val <<= 10; ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); if (ret < 0) return ret; *ppos += cnt; return cnt; } static ssize_t tracing_total_entries_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r, cpu; unsigned long size = 0, expanded_size = 0; mutex_lock(&trace_types_lock); for_each_tracing_cpu(cpu) { size += per_cpu_ptr(tr->array_buffer.data, cpu)->entries >> 10; if (!tr->ring_buffer_expanded) expanded_size += trace_buf_size >> 10; } if (tr->ring_buffer_expanded) r = sprintf(buf, "%lu\n", size); else r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size); mutex_unlock(&trace_types_lock); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t tracing_last_boot_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct seq_buf seq; char buf[64]; seq_buf_init(&seq, buf, 64); seq_buf_printf(&seq, "text delta:\t%ld\n", tr->text_delta); seq_buf_printf(&seq, "data delta:\t%ld\n", tr->data_delta); return simple_read_from_buffer(ubuf, cnt, ppos, buf, seq_buf_used(&seq)); } static int tracing_buffer_meta_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; int cpu = tracing_get_cpu(inode); int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = ring_buffer_meta_seq_init(filp, tr->array_buffer.buffer, cpu); if (ret < 0) __trace_array_put(tr); return ret; } static ssize_t tracing_free_buffer_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { /* * There is no need to read what the user has written, this function * is just to make sure that there is no error when "echo" is used */ *ppos += cnt; return cnt; } static int tracing_free_buffer_release(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; /* disable tracing ? */ if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE) tracer_tracing_off(tr); /* resize the ring buffer to 0 */ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); trace_array_put(tr); return 0; } #define TRACE_MARKER_MAX_SIZE 4096 static ssize_t tracing_mark_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; enum event_trigger_type tt = ETT_NONE; struct trace_buffer *buffer; struct print_entry *entry; int meta_size; ssize_t written; size_t size; int len; /* Used in tracing_mark_raw_write() as well */ #define FAULTED_STR "<faulted>" #define FAULTED_SIZE (sizeof(FAULTED_STR) - 1) /* '\0' is already accounted for */ if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; if ((ssize_t)cnt < 0) return -EINVAL; if (cnt > TRACE_MARKER_MAX_SIZE) cnt = TRACE_MARKER_MAX_SIZE; meta_size = sizeof(*entry) + 2; /* add '\0' and possible '\n' */ again: size = cnt + meta_size; /* If less than "<faulted>", then make sure we can still add that */ if (cnt < FAULTED_SIZE) size += FAULTED_SIZE - cnt; buffer = tr->array_buffer.buffer; event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size, tracing_gen_ctx()); if (unlikely(!event)) { /* * If the size was greater than what was allowed, then * make it smaller and try again. */ if (size > ring_buffer_max_event_size(buffer)) { /* cnt < FAULTED size should never be bigger than max */ if (WARN_ON_ONCE(cnt < FAULTED_SIZE)) return -EBADF; cnt = ring_buffer_max_event_size(buffer) - meta_size; /* The above should only happen once */ if (WARN_ON_ONCE(cnt + meta_size == size)) return -EBADF; goto again; } /* Ring buffer disabled, return as if not open for write */ return -EBADF; } entry = ring_buffer_event_data(event); entry->ip = _THIS_IP_; len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt); if (len) { memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); cnt = FAULTED_SIZE; written = -EFAULT; } else written = cnt; if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) { /* do not add \n before testing triggers, but add \0 */ entry->buf[cnt] = '\0'; tt = event_triggers_call(tr->trace_marker_file, buffer, entry, event); } if (entry->buf[cnt - 1] != '\n') { entry->buf[cnt] = '\n'; entry->buf[cnt + 1] = '\0'; } else entry->buf[cnt] = '\0'; if (static_branch_unlikely(&trace_marker_exports_enabled)) ftrace_exports(event, TRACE_EXPORT_MARKER); __buffer_unlock_commit(buffer, event); if (tt) event_triggers_post_call(tr->trace_marker_file, tt); return written; } static ssize_t tracing_mark_raw_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct trace_array *tr = filp->private_data; struct ring_buffer_event *event; struct trace_buffer *buffer; struct raw_data_entry *entry; ssize_t written; int size; int len; #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int)) if (tracing_disabled) return -EINVAL; if (!(tr->trace_flags & TRACE_ITER_MARKERS)) return -EINVAL; /* The marker must at least have a tag id */ if (cnt < sizeof(unsigned int)) return -EINVAL; size = sizeof(*entry) + cnt; if (cnt < FAULT_SIZE_ID) size += FAULT_SIZE_ID - cnt; buffer = tr->array_buffer.buffer; if (size > ring_buffer_max_event_size(buffer)) return -EINVAL; event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size, tracing_gen_ctx()); if (!event) /* Ring buffer disabled, return as if not open for write */ return -EBADF; entry = ring_buffer_event_data(event); len = __copy_from_user_inatomic(&entry->id, ubuf, cnt); if (len) { entry->id = -1; memcpy(&entry->buf, FAULTED_STR, FAULTED_SIZE); written = -EFAULT; } else written = cnt; __buffer_unlock_commit(buffer, event); return written; } static int tracing_clock_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) seq_printf(m, "%s%s%s%s", i ? " " : "", i == tr->clock_id ? "[" : "", trace_clocks[i].name, i == tr->clock_id ? "]" : ""); seq_putc(m, '\n'); return 0; } int tracing_set_clock(struct trace_array *tr, const char *clockstr) { int i; for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) { if (strcmp(trace_clocks[i].name, clockstr) == 0) break; } if (i == ARRAY_SIZE(trace_clocks)) return -EINVAL; mutex_lock(&trace_types_lock); tr->clock_id = i; ring_buffer_set_clock(tr->array_buffer.buffer, trace_clocks[i].func); /* * New clock may not be consistent with the previous clock. * Reset the buffer so that it doesn't have incomparable timestamps. */ tracing_reset_online_cpus(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE if (tr->max_buffer.buffer) ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); tracing_reset_online_cpus(&tr->max_buffer); #endif mutex_unlock(&trace_types_lock); return 0; } static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *fpos) { struct seq_file *m = filp->private_data; struct trace_array *tr = m->private; char buf[64]; const char *clockstr; int ret; if (cnt >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, ubuf, cnt)) return -EFAULT; buf[cnt] = 0; clockstr = strstrip(buf); ret = tracing_set_clock(tr, clockstr); if (ret) return ret; *fpos += cnt; return cnt; } static int tracing_clock_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = single_open(file, tracing_clock_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } static int tracing_time_stamp_mode_show(struct seq_file *m, void *v) { struct trace_array *tr = m->private; mutex_lock(&trace_types_lock); if (ring_buffer_time_stamp_abs(tr->array_buffer.buffer)) seq_puts(m, "delta [absolute]\n"); else seq_puts(m, "[delta] absolute\n"); mutex_unlock(&trace_types_lock); return 0; } static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private); if (ret < 0) trace_array_put(tr); return ret; } u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe) { if (rbe == this_cpu_read(trace_buffered_event)) return ring_buffer_time_stamp(buffer); return ring_buffer_event_time_stamp(buffer, rbe); } /* * Set or disable using the per CPU trace_buffer_event when possible. */ int tracing_set_filter_buffering(struct trace_array *tr, bool set) { guard(mutex)(&trace_types_lock); if (set && tr->no_filter_buffering_ref++) return 0; if (!set) { if (WARN_ON_ONCE(!tr->no_filter_buffering_ref)) return -EINVAL; --tr->no_filter_buffering_ref; } return 0; } struct ftrace_buffer_info { struct trace_iterator iter; void *spare; unsigned int spare_cpu; unsigned int spare_size; unsigned int read; }; #ifdef CONFIG_TRACER_SNAPSHOT static int tracing_snapshot_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; struct trace_iterator *iter; struct seq_file *m; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; if (file->f_mode & FMODE_READ) { iter = __tracing_open(inode, file, true); if (IS_ERR(iter)) ret = PTR_ERR(iter); } else { /* Writes still need the seq_file to hold the private data */ ret = -ENOMEM; m = kzalloc(sizeof(*m), GFP_KERNEL); if (!m) goto out; iter = kzalloc(sizeof(*iter), GFP_KERNEL); if (!iter) { kfree(m); goto out; } ret = 0; iter->tr = tr; iter->array_buffer = &tr->max_buffer; iter->cpu_file = tracing_get_cpu(inode); m->private = iter; file->private_data = m; } out: if (ret < 0) trace_array_put(tr); return ret; } static void tracing_swap_cpu_buffer(void *tr) { update_max_tr_single((struct trace_array *)tr, current, smp_processor_id()); } static ssize_t tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct seq_file *m = filp->private_data; struct trace_iterator *iter = m->private; struct trace_array *tr = iter->tr; unsigned long val; int ret; ret = tracing_update_buffers(tr); if (ret < 0) return ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; guard(mutex)(&trace_types_lock); if (tr->current_trace->use_max_tr) return -EBUSY; local_irq_disable(); arch_spin_lock(&tr->max_lock); if (tr->cond_snapshot) ret = -EBUSY; arch_spin_unlock(&tr->max_lock); local_irq_enable(); if (ret) return ret; switch (val) { case 0: if (iter->cpu_file != RING_BUFFER_ALL_CPUS) return -EINVAL; if (tr->allocated_snapshot) free_snapshot(tr); break; case 1: /* Only allow per-cpu swap if the ring buffer supports it */ #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP if (iter->cpu_file != RING_BUFFER_ALL_CPUS) return -EINVAL; #endif if (tr->allocated_snapshot) ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->array_buffer, iter->cpu_file); ret = tracing_arm_snapshot_locked(tr); if (ret) return ret; /* Now, we're going to swap */ if (iter->cpu_file == RING_BUFFER_ALL_CPUS) { local_irq_disable(); update_max_tr(tr, current, smp_processor_id(), NULL); local_irq_enable(); } else { smp_call_function_single(iter->cpu_file, tracing_swap_cpu_buffer, (void *)tr, 1); } tracing_disarm_snapshot(tr); break; default: if (tr->allocated_snapshot) { if (iter->cpu_file == RING_BUFFER_ALL_CPUS) tracing_reset_online_cpus(&tr->max_buffer); else tracing_reset_cpu(&tr->max_buffer, iter->cpu_file); } break; } if (ret >= 0) { *ppos += cnt; ret = cnt; } return ret; } static int tracing_snapshot_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; int ret; ret = tracing_release(inode, file); if (file->f_mode & FMODE_READ) return ret; /* If write only, the seq_file is just a stub */ if (m) kfree(m->private); kfree(m); return 0; } static int tracing_buffers_open(struct inode *inode, struct file *filp); static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos); static int tracing_buffers_release(struct inode *inode, struct file *file); static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags); static int snapshot_raw_open(struct inode *inode, struct file *filp) { struct ftrace_buffer_info *info; int ret; /* The following checks for tracefs lockdown */ ret = tracing_buffers_open(inode, filp); if (ret < 0) return ret; info = filp->private_data; if (info->iter.trace->use_max_tr) { tracing_buffers_release(inode, filp); return -EBUSY; } info->iter.snapshot = true; info->iter.array_buffer = &info->iter.tr->max_buffer; return ret; } #endif /* CONFIG_TRACER_SNAPSHOT */ static const struct file_operations tracing_thresh_fops = { .open = tracing_open_generic, .read = tracing_thresh_read, .write = tracing_thresh_write, .llseek = generic_file_llseek, }; #ifdef CONFIG_TRACER_MAX_TRACE static const struct file_operations tracing_max_lat_fops = { .open = tracing_open_generic_tr, .read = tracing_max_lat_read, .write = tracing_max_lat_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #endif static const struct file_operations set_tracer_fops = { .open = tracing_open_generic_tr, .read = tracing_set_trace_read, .write = tracing_set_trace_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_pipe_fops = { .open = tracing_open_pipe, .poll = tracing_poll_pipe, .read = tracing_read_pipe, .splice_read = tracing_splice_read_pipe, .release = tracing_release_pipe, }; static const struct file_operations tracing_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_entries_read, .write = tracing_entries_write, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_buffer_meta_fops = { .open = tracing_buffer_meta_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_seq_release, }; static const struct file_operations tracing_total_entries_fops = { .open = tracing_open_generic_tr, .read = tracing_total_entries_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_free_buffer_fops = { .open = tracing_open_generic_tr, .write = tracing_free_buffer_write, .release = tracing_free_buffer_release, }; static const struct file_operations tracing_mark_fops = { .open = tracing_mark_open, .write = tracing_mark_write, .release = tracing_release_generic_tr, }; static const struct file_operations tracing_mark_raw_fops = { .open = tracing_mark_open, .write = tracing_mark_raw_write, .release = tracing_release_generic_tr, }; static const struct file_operations trace_clock_fops = { .open = tracing_clock_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, .write = tracing_clock_write, }; static const struct file_operations trace_time_stamp_mode_fops = { .open = tracing_time_stamp_mode_open, .read = seq_read, .llseek = seq_lseek, .release = tracing_single_release_tr, }; static const struct file_operations last_boot_fops = { .open = tracing_open_generic_tr, .read = tracing_last_boot_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #ifdef CONFIG_TRACER_SNAPSHOT static const struct file_operations snapshot_fops = { .open = tracing_snapshot_open, .read = seq_read, .write = tracing_snapshot_write, .llseek = tracing_lseek, .release = tracing_snapshot_release, }; static const struct file_operations snapshot_raw_fops = { .open = snapshot_raw_open, .read = tracing_buffers_read, .release = tracing_buffers_release, .splice_read = tracing_buffers_splice_read, }; #endif /* CONFIG_TRACER_SNAPSHOT */ /* * trace_min_max_write - Write a u64 value to a trace_min_max_param struct * @filp: The active open file structure * @ubuf: The userspace provided buffer to read value into * @cnt: The maximum number of bytes to read * @ppos: The current "file" position * * This function implements the write interface for a struct trace_min_max_param. * The filp->private_data must point to a trace_min_max_param structure that * defines where to write the value, the min and the max acceptable values, * and a lock to protect the write. */ static ssize_t trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_min_max_param *param = filp->private_data; u64 val; int err; if (!param) return -EFAULT; err = kstrtoull_from_user(ubuf, cnt, 10, &val); if (err) return err; if (param->lock) mutex_lock(param->lock); if (param->min && val < *param->min) err = -EINVAL; if (param->max && val > *param->max) err = -EINVAL; if (!err) *param->val = val; if (param->lock) mutex_unlock(param->lock); if (err) return err; return cnt; } /* * trace_min_max_read - Read a u64 value from a trace_min_max_param struct * @filp: The active open file structure * @ubuf: The userspace provided buffer to read value into * @cnt: The maximum number of bytes to read * @ppos: The current "file" position * * This function implements the read interface for a struct trace_min_max_param. * The filp->private_data must point to a trace_min_max_param struct with valid * data. */ static ssize_t trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_min_max_param *param = filp->private_data; char buf[U64_STR_SIZE]; int len; u64 val; if (!param) return -EFAULT; val = *param->val; if (cnt > sizeof(buf)) cnt = sizeof(buf); len = snprintf(buf, sizeof(buf), "%llu\n", val); return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); } const struct file_operations trace_min_max_fops = { .open = tracing_open_generic, .read = trace_min_max_read, .write = trace_min_max_write, }; #define TRACING_LOG_ERRS_MAX 8 #define TRACING_LOG_LOC_MAX 128 #define CMD_PREFIX " Command: " struct err_info { const char **errs; /* ptr to loc-specific array of err strings */ u8 type; /* index into errs -> specific err string */ u16 pos; /* caret position */ u64 ts; }; struct tracing_log_err { struct list_head list; struct err_info info; char loc[TRACING_LOG_LOC_MAX]; /* err location */ char *cmd; /* what caused err */ }; static DEFINE_MUTEX(tracing_err_log_lock); static struct tracing_log_err *alloc_tracing_log_err(int len) { struct tracing_log_err *err; err = kzalloc(sizeof(*err), GFP_KERNEL); if (!err) return ERR_PTR(-ENOMEM); err->cmd = kzalloc(len, GFP_KERNEL); if (!err->cmd) { kfree(err); return ERR_PTR(-ENOMEM); } return err; } static void free_tracing_log_err(struct tracing_log_err *err) { kfree(err->cmd); kfree(err); } static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr, int len) { struct tracing_log_err *err; char *cmd; if (tr->n_err_log_entries < TRACING_LOG_ERRS_MAX) { err = alloc_tracing_log_err(len); if (PTR_ERR(err) != -ENOMEM) tr->n_err_log_entries++; return err; } cmd = kzalloc(len, GFP_KERNEL); if (!cmd) return ERR_PTR(-ENOMEM); err = list_first_entry(&tr->err_log, struct tracing_log_err, list); kfree(err->cmd); err->cmd = cmd; list_del(&err->list); return err; } /** * err_pos - find the position of a string within a command for error careting * @cmd: The tracing command that caused the error * @str: The string to position the caret at within @cmd * * Finds the position of the first occurrence of @str within @cmd. The * return value can be passed to tracing_log_err() for caret placement * within @cmd. * * Returns the index within @cmd of the first occurrence of @str or 0 * if @str was not found. */ unsigned int err_pos(char *cmd, const char *str) { char *found; if (WARN_ON(!strlen(cmd))) return 0; found = strstr(cmd, str); if (found) return found - cmd; return 0; } /** * tracing_log_err - write an error to the tracing error log * @tr: The associated trace array for the error (NULL for top level array) * @loc: A string describing where the error occurred * @cmd: The tracing command that caused the error * @errs: The array of loc-specific static error strings * @type: The index into errs[], which produces the specific static err string * @pos: The position the caret should be placed in the cmd * * Writes an error into tracing/error_log of the form: * * <loc>: error: <text> * Command: <cmd> * ^ * * tracing/error_log is a small log file containing the last * TRACING_LOG_ERRS_MAX errors (8). Memory for errors isn't allocated * unless there has been a tracing error, and the error log can be * cleared and have its memory freed by writing the empty string in * truncation mode to it i.e. echo > tracing/error_log. * * NOTE: the @errs array along with the @type param are used to * produce a static error string - this string is not copied and saved * when the error is logged - only a pointer to it is saved. See * existing callers for examples of how static strings are typically * defined for use with tracing_log_err(). */ void tracing_log_err(struct trace_array *tr, const char *loc, const char *cmd, const char **errs, u8 type, u16 pos) { struct tracing_log_err *err; int len = 0; if (!tr) tr = &global_trace; len += sizeof(CMD_PREFIX) + 2 * sizeof("\n") + strlen(cmd) + 1; guard(mutex)(&tracing_err_log_lock); err = get_tracing_log_err(tr, len); if (PTR_ERR(err) == -ENOMEM) return; snprintf(err->loc, TRACING_LOG_LOC_MAX, "%s: error: ", loc); snprintf(err->cmd, len, "\n" CMD_PREFIX "%s\n", cmd); err->info.errs = errs; err->info.type = type; err->info.pos = pos; err->info.ts = local_clock(); list_add_tail(&err->list, &tr->err_log); } static void clear_tracing_err_log(struct trace_array *tr) { struct tracing_log_err *err, *next; mutex_lock(&tracing_err_log_lock); list_for_each_entry_safe(err, next, &tr->err_log, list) { list_del(&err->list); free_tracing_log_err(err); } tr->n_err_log_entries = 0; mutex_unlock(&tracing_err_log_lock); } static void *tracing_err_log_seq_start(struct seq_file *m, loff_t *pos) { struct trace_array *tr = m->private; mutex_lock(&tracing_err_log_lock); return seq_list_start(&tr->err_log, *pos); } static void *tracing_err_log_seq_next(struct seq_file *m, void *v, loff_t *pos) { struct trace_array *tr = m->private; return seq_list_next(v, &tr->err_log, pos); } static void tracing_err_log_seq_stop(struct seq_file *m, void *v) { mutex_unlock(&tracing_err_log_lock); } static void tracing_err_log_show_pos(struct seq_file *m, u16 pos) { u16 i; for (i = 0; i < sizeof(CMD_PREFIX) - 1; i++) seq_putc(m, ' '); for (i = 0; i < pos; i++) seq_putc(m, ' '); seq_puts(m, "^\n"); } static int tracing_err_log_seq_show(struct seq_file *m, void *v) { struct tracing_log_err *err = v; if (err) { const char *err_text = err->info.errs[err->info.type]; u64 sec = err->info.ts; u32 nsec; nsec = do_div(sec, NSEC_PER_SEC); seq_printf(m, "[%5llu.%06u] %s%s", sec, nsec / 1000, err->loc, err_text); seq_printf(m, "%s", err->cmd); tracing_err_log_show_pos(m, err->info.pos); } return 0; } static const struct seq_operations tracing_err_log_seq_ops = { .start = tracing_err_log_seq_start, .next = tracing_err_log_seq_next, .stop = tracing_err_log_seq_stop, .show = tracing_err_log_seq_show }; static int tracing_err_log_open(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; int ret = 0; ret = tracing_check_open_get_tr(tr); if (ret) return ret; /* If this file was opened for write, then erase contents */ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) clear_tracing_err_log(tr); if (file->f_mode & FMODE_READ) { ret = seq_open(file, &tracing_err_log_seq_ops); if (!ret) { struct seq_file *m = file->private_data; m->private = tr; } else { trace_array_put(tr); } } return ret; } static ssize_t tracing_err_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { return count; } static int tracing_err_log_release(struct inode *inode, struct file *file) { struct trace_array *tr = inode->i_private; trace_array_put(tr); if (file->f_mode & FMODE_READ) seq_release(inode, file); return 0; } static const struct file_operations tracing_err_log_fops = { .open = tracing_err_log_open, .write = tracing_err_log_write, .read = seq_read, .llseek = tracing_lseek, .release = tracing_err_log_release, }; static int tracing_buffers_open(struct inode *inode, struct file *filp) { struct trace_array *tr = inode->i_private; struct ftrace_buffer_info *info; int ret; ret = tracing_check_open_get_tr(tr); if (ret) return ret; info = kvzalloc(sizeof(*info), GFP_KERNEL); if (!info) { trace_array_put(tr); return -ENOMEM; } mutex_lock(&trace_types_lock); info->iter.tr = tr; info->iter.cpu_file = tracing_get_cpu(inode); info->iter.trace = tr->current_trace; info->iter.array_buffer = &tr->array_buffer; info->spare = NULL; /* Force reading ring buffer for first read */ info->read = (unsigned int)-1; filp->private_data = info; tr->trace_ref++; mutex_unlock(&trace_types_lock); ret = nonseekable_open(inode, filp); if (ret < 0) trace_array_put(tr); return ret; } static __poll_t tracing_buffers_poll(struct file *filp, poll_table *poll_table) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; return trace_poll(iter, filp, poll_table); } static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; void *trace_data; int page_size; ssize_t ret = 0; ssize_t size; if (!count) return 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); /* Make sure the spare matches the current sub buffer size */ if (info->spare) { if (page_size != info->spare_size) { ring_buffer_free_read_page(iter->array_buffer->buffer, info->spare_cpu, info->spare); info->spare = NULL; } } if (!info->spare) { info->spare = ring_buffer_alloc_read_page(iter->array_buffer->buffer, iter->cpu_file); if (IS_ERR(info->spare)) { ret = PTR_ERR(info->spare); info->spare = NULL; } else { info->spare_cpu = iter->cpu_file; info->spare_size = page_size; } } if (!info->spare) return ret; /* Do we have previous read data to read? */ if (info->read < page_size) goto read; again: trace_access_lock(iter->cpu_file); ret = ring_buffer_read_page(iter->array_buffer->buffer, info->spare, count, iter->cpu_file, 0); trace_access_unlock(iter->cpu_file); if (ret < 0) { if (trace_empty(iter) && !iter->closed) { if ((filp->f_flags & O_NONBLOCK)) return -EAGAIN; ret = wait_on_pipe(iter, 0); if (ret) return ret; goto again; } return 0; } info->read = 0; read: size = page_size - info->read; if (size > count) size = count; trace_data = ring_buffer_read_page_data(info->spare); ret = copy_to_user(ubuf, trace_data + info->read, size); if (ret == size) return -EFAULT; size -= ret; *ppos += size; info->read += size; return size; } static int tracing_buffers_flush(struct file *file, fl_owner_t id) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; iter->closed = true; /* Make sure the waiters see the new wait_index */ (void)atomic_fetch_inc_release(&iter->wait_index); ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); return 0; } static int tracing_buffers_release(struct inode *inode, struct file *file) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; mutex_lock(&trace_types_lock); iter->tr->trace_ref--; __trace_array_put(iter->tr); if (info->spare) ring_buffer_free_read_page(iter->array_buffer->buffer, info->spare_cpu, info->spare); kvfree(info); mutex_unlock(&trace_types_lock); return 0; } struct buffer_ref { struct trace_buffer *buffer; void *page; int cpu; refcount_t refcount; }; static void buffer_ref_release(struct buffer_ref *ref) { if (!refcount_dec_and_test(&ref->refcount)) return; ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); } static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; buffer_ref_release(ref); buf->private = 0; } static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { struct buffer_ref *ref = (struct buffer_ref *)buf->private; if (refcount_read(&ref->refcount) > INT_MAX/2) return false; refcount_inc(&ref->refcount); return true; } /* Pipe buffer operations for a buffer. */ static const struct pipe_buf_operations buffer_pipe_buf_ops = { .release = buffer_pipe_buf_release, .get = buffer_pipe_buf_get, }; /* * Callback from splice_to_pipe(), if we need to release some pages * at the end of the spd in case we error'ed out in filling the pipe. */ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) { struct buffer_ref *ref = (struct buffer_ref *)spd->partial[i].private; buffer_ref_release(ref); spd->partial[i].private = 0; } static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; struct partial_page partial_def[PIPE_DEF_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS]; struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, .nr_pages_max = PIPE_DEF_BUFFERS, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, }; struct buffer_ref *ref; bool woken = false; int page_size; int entries, i; ssize_t ret = 0; #ifdef CONFIG_TRACER_MAX_TRACE if (iter->snapshot && iter->tr->current_trace->use_max_tr) return -EBUSY; #endif page_size = ring_buffer_subbuf_size_get(iter->array_buffer->buffer); if (*ppos & (page_size - 1)) return -EINVAL; if (len & (page_size - 1)) { if (len < page_size) return -EINVAL; len &= (~(page_size - 1)); } if (splice_grow_spd(pipe, &spd)) return -ENOMEM; again: trace_access_lock(iter->cpu_file); entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= page_size) { struct page *page; int r; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) { ret = -ENOMEM; break; } refcount_set(&ref->refcount, 1); ref->buffer = iter->array_buffer->buffer; ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); if (IS_ERR(ref->page)) { ret = PTR_ERR(ref->page); ref->page = NULL; kfree(ref); break; } ref->cpu = iter->cpu_file; r = ring_buffer_read_page(ref->buffer, ref->page, len, iter->cpu_file, 1); if (r < 0) { ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); kfree(ref); break; } page = virt_to_page(ring_buffer_read_page_data(ref->page)); spd.pages[i] = page; spd.partial[i].len = page_size; spd.partial[i].offset = 0; spd.partial[i].private = (unsigned long)ref; spd.nr_pages++; *ppos += page_size; entries = ring_buffer_entries_cpu(iter->array_buffer->buffer, iter->cpu_file); } trace_access_unlock(iter->cpu_file); spd.nr_pages = i; /* did we read anything? */ if (!spd.nr_pages) { if (ret) goto out; if (woken) goto out; ret = -EAGAIN; if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) goto out; ret = wait_on_pipe(iter, iter->snapshot ? 0 : iter->tr->buffer_percent); if (ret) goto out; /* No need to wait after waking up when tracing is off */ if (!tracer_tracing_is_on(iter->tr)) goto out; /* Iterate one more time to collect any new data then exit */ woken = true; goto again; } ret = splice_to_pipe(pipe, &spd); out: splice_shrink_spd(&spd); return ret; } static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ftrace_buffer_info *info = file->private_data; struct trace_iterator *iter = &info->iter; int err; if (cmd == TRACE_MMAP_IOCTL_GET_READER) { if (!(file->f_flags & O_NONBLOCK)) { err = ring_buffer_wait(iter->array_buffer->buffer, iter->cpu_file, iter->tr->buffer_percent, NULL, NULL); if (err) return err; } return ring_buffer_map_get_reader(iter->array_buffer->buffer, iter->cpu_file); } else if (cmd) { return -ENOTTY; } /* * An ioctl call with cmd 0 to the ring buffer file will wake up all * waiters */ mutex_lock(&trace_types_lock); /* Make sure the waiters see the new wait_index */ (void)atomic_fetch_inc_release(&iter->wait_index); ring_buffer_wake_waiters(iter->array_buffer->buffer, iter->cpu_file); mutex_unlock(&trace_types_lock); return 0; } #ifdef CONFIG_TRACER_MAX_TRACE static int get_snapshot_map(struct trace_array *tr) { int err = 0; /* * Called with mmap_lock held. lockdep would be unhappy if we would now * take trace_types_lock. Instead use the specific * snapshot_trigger_lock. */ spin_lock(&tr->snapshot_trigger_lock); if (tr->snapshot || tr->mapped == UINT_MAX) err = -EBUSY; else tr->mapped++; spin_unlock(&tr->snapshot_trigger_lock); /* Wait for update_max_tr() to observe iter->tr->mapped */ if (tr->mapped == 1) synchronize_rcu(); return err; } static void put_snapshot_map(struct trace_array *tr) { spin_lock(&tr->snapshot_trigger_lock); if (!WARN_ON(!tr->mapped)) tr->mapped--; spin_unlock(&tr->snapshot_trigger_lock); } #else static inline int get_snapshot_map(struct trace_array *tr) { return 0; } static inline void put_snapshot_map(struct trace_array *tr) { } #endif static void tracing_buffers_mmap_close(struct vm_area_struct *vma) { struct ftrace_buffer_info *info = vma->vm_file->private_data; struct trace_iterator *iter = &info->iter; WARN_ON(ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file)); put_snapshot_map(iter->tr); } static const struct vm_operations_struct tracing_buffers_vmops = { .close = tracing_buffers_mmap_close, }; static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma) { struct ftrace_buffer_info *info = filp->private_data; struct trace_iterator *iter = &info->iter; int ret = 0; ret = get_snapshot_map(iter->tr); if (ret) return ret; ret = ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file, vma); if (ret) put_snapshot_map(iter->tr); vma->vm_ops = &tracing_buffers_vmops; return ret; } static const struct file_operations tracing_buffers_fops = { .open = tracing_buffers_open, .read = tracing_buffers_read, .poll = tracing_buffers_poll, .release = tracing_buffers_release, .flush = tracing_buffers_flush, .splice_read = tracing_buffers_splice_read, .unlocked_ioctl = tracing_buffers_ioctl, .mmap = tracing_buffers_mmap, }; static ssize_t tracing_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *ppos) { struct inode *inode = file_inode(filp); struct trace_array *tr = inode->i_private; struct array_buffer *trace_buf = &tr->array_buffer; int cpu = tracing_get_cpu(inode); struct trace_seq *s; unsigned long cnt; unsigned long long t; unsigned long usec_rem; s = kmalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; trace_seq_init(s); cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "entries: %ld\n", cnt); cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "overrun: %ld\n", cnt); cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "commit overrun: %ld\n", cnt); cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "bytes: %ld\n", cnt); if (trace_clocks[tr->clock_id].in_ns) { /* local or global for trace_clock */ t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem); t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer)); usec_rem = do_div(t, USEC_PER_SEC); trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem); } else { /* counter or tsc mode for trace_clock */ trace_seq_printf(s, "oldest event ts: %llu\n", ring_buffer_oldest_event_ts(trace_buf->buffer, cpu)); trace_seq_printf(s, "now ts: %llu\n", ring_buffer_time_stamp(trace_buf->buffer)); } cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "dropped events: %ld\n", cnt); cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); trace_seq_printf(s, "read events: %ld\n", cnt); count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, trace_seq_used(s)); kfree(s); return count; } static const struct file_operations tracing_stats_fops = { .open = tracing_open_generic_tr, .read = tracing_stats_read, .llseek = generic_file_llseek, .release = tracing_release_generic_tr, }; #ifdef CONFIG_DYNAMIC_FTRACE static ssize_t tracing_read_dyn_info(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { ssize_t ret; char *buf; int r; /* 512 should be plenty to hold the amount needed */ #define DYN_INFO_BUF_SIZE 512 buf = kmalloc(DYN_INFO_BUF_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; r = scnprintf(buf, DYN_INFO_BUF_SIZE, "%ld pages:%ld groups: %ld\n" "ftrace boot update time = %llu (ns)\n" "ftrace module total update time = %llu (ns)\n", ftrace_update_tot_cnt, ftrace_number_of_pages, ftrace_number_of_groups, ftrace_update_time, ftrace_total_mod_time); ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); kfree(buf); return ret; } static const struct file_operations tracing_dyn_info_fops = { .open = tracing_open_generic, .read = tracing_read_dyn_info, .llseek = generic_file_llseek, }; #endif /* CONFIG_DYNAMIC_FTRACE */ #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) static void ftrace_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { tracing_snapshot_instance(tr); } static void ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, struct trace_array *tr, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) { if (*count <= 0) return; (*count)--; } tracing_snapshot_instance(tr); } static int ftrace_snapshot_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { struct ftrace_func_mapper *mapper = data; long *count = NULL; seq_printf(m, "%ps:", (void *)ip); seq_puts(m, "snapshot"); if (mapper) count = (long *)ftrace_func_mapper_find_ip(mapper, ip); if (count) seq_printf(m, ":count=%ld\n", *count); else seq_puts(m, ":unlimited\n"); return 0; } static int ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *init_data, void **data) { struct ftrace_func_mapper *mapper = *data; if (!mapper) { mapper = allocate_ftrace_func_mapper(); if (!mapper) return -ENOMEM; *data = mapper; } return ftrace_func_mapper_add_ip(mapper, ip, init_data); } static void ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr, unsigned long ip, void *data) { struct ftrace_func_mapper *mapper = data; if (!ip) { if (!mapper) return; free_ftrace_func_mapper(mapper, NULL); return; } ftrace_func_mapper_remove_ip(mapper, ip); } static struct ftrace_probe_ops snapshot_probe_ops = { .func = ftrace_snapshot, .print = ftrace_snapshot_print, }; static struct ftrace_probe_ops snapshot_count_probe_ops = { .func = ftrace_count_snapshot, .print = ftrace_snapshot_print, .init = ftrace_snapshot_init, .free = ftrace_snapshot_free, }; static int ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash, char *glob, char *cmd, char *param, int enable) { struct ftrace_probe_ops *ops; void *count = (void *)-1; char *number; int ret; if (!tr) return -ENODEV; /* hash funcs only work with set_ftrace_filter */ if (!enable) return -EINVAL; ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops; if (glob[0] == '!') { ret = unregister_ftrace_function_probe_func(glob+1, tr, ops); if (!ret) tracing_disarm_snapshot(tr); return ret; } if (!param) goto out_reg; number = strsep(&param, ":"); if (!strlen(number)) goto out_reg; /* * We use the callback data field (which is a pointer) * as our counter. */ ret = kstrtoul(number, 0, (unsigned long *)&count); if (ret) return ret; out_reg: ret = tracing_arm_snapshot(tr); if (ret < 0) goto out; ret = register_ftrace_function_probe(glob, tr, ops, count); if (ret < 0) tracing_disarm_snapshot(tr); out: return ret < 0 ? ret : 0; } static struct ftrace_func_command ftrace_snapshot_cmd = { .name = "snapshot", .func = ftrace_trace_snapshot_callback, }; static __init int register_snapshot_cmd(void) { return register_ftrace_command(&ftrace_snapshot_cmd); } #else static inline __init int register_snapshot_cmd(void) { return 0; } #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ static struct dentry *tracing_get_dentry(struct trace_array *tr) { if (WARN_ON(!tr->dir)) return ERR_PTR(-ENODEV); /* Top directory uses NULL as the parent */ if (tr->flags & TRACE_ARRAY_FL_GLOBAL) return NULL; /* All sub buffers have a descriptor */ return tr->dir; } static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) { struct dentry *d_tracer; if (tr->percpu_dir) return tr->percpu_dir; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer); MEM_FAIL(!tr->percpu_dir, "Could not create tracefs directory 'per_cpu/%d'\n", cpu); return tr->percpu_dir; } static struct dentry * trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, void *data, long cpu, const struct file_operations *fops) { struct dentry *ret = trace_create_file(name, mode, parent, data, fops); if (ret) /* See tracing_get_cpu() */ d_inode(ret)->i_cdev = (void *)(cpu + 1); return ret; } static void tracing_init_tracefs_percpu(struct trace_array *tr, long cpu) { struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); struct dentry *d_cpu; char cpu_dir[30]; /* 30 characters should be more than enough */ if (!d_percpu) return; snprintf(cpu_dir, 30, "cpu%ld", cpu); d_cpu = tracefs_create_dir(cpu_dir, d_percpu); if (!d_cpu) { pr_warn("Could not create tracefs '%s' entry\n", cpu_dir); return; } /* per cpu trace_pipe */ trace_create_cpu_file("trace_pipe", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_pipe_fops); /* per cpu trace */ trace_create_cpu_file("trace", TRACE_MODE_WRITE, d_cpu, tr, cpu, &tracing_fops); trace_create_cpu_file("trace_pipe_raw", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_buffers_fops); trace_create_cpu_file("stats", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_stats_fops); trace_create_cpu_file("buffer_size_kb", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_entries_fops); if (tr->range_addr_start) trace_create_cpu_file("buffer_meta", TRACE_MODE_READ, d_cpu, tr, cpu, &tracing_buffer_meta_fops); #ifdef CONFIG_TRACER_SNAPSHOT if (!tr->range_addr_start) { trace_create_cpu_file("snapshot", TRACE_MODE_WRITE, d_cpu, tr, cpu, &snapshot_fops); trace_create_cpu_file("snapshot_raw", TRACE_MODE_READ, d_cpu, tr, cpu, &snapshot_raw_fops); } #endif } #ifdef CONFIG_FTRACE_SELFTEST /* Let selftest have access to static functions in this file */ #include "trace_selftest.c" #endif static ssize_t trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; char *buf; if (topt->flags->val & topt->opt->bit) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_option_dentry *topt = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; if (!!(topt->flags->val & topt->opt->bit) != val) { mutex_lock(&trace_types_lock); ret = __set_tracer_option(topt->tr, topt->flags, topt->opt, !val); mutex_unlock(&trace_types_lock); if (ret) return ret; } *ppos += cnt; return cnt; } static int tracing_open_options(struct inode *inode, struct file *filp) { struct trace_option_dentry *topt = inode->i_private; int ret; ret = tracing_check_open_get_tr(topt->tr); if (ret) return ret; filp->private_data = inode->i_private; return 0; } static int tracing_release_options(struct inode *inode, struct file *file) { struct trace_option_dentry *topt = file->private_data; trace_array_put(topt->tr); return 0; } static const struct file_operations trace_options_fops = { .open = tracing_open_options, .read = trace_options_read, .write = trace_options_write, .llseek = generic_file_llseek, .release = tracing_release_options, }; /* * In order to pass in both the trace_array descriptor as well as the index * to the flag that the trace option file represents, the trace_array * has a character array of trace_flags_index[], which holds the index * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc. * The address of this character array is passed to the flag option file * read/write callbacks. * * In order to extract both the index and the trace_array descriptor, * get_tr_index() uses the following algorithm. * * idx = *ptr; * * As the pointer itself contains the address of the index (remember * index[1] == 1). * * Then to get the trace_array descriptor, by subtracting that index * from the ptr, we get to the start of the index itself. * * ptr - idx == &index[0] * * Then a simple container_of() from that pointer gets us to the * trace_array descriptor. */ static void get_tr_index(void *data, struct trace_array **ptr, unsigned int *pindex) { *pindex = *(unsigned char *)data; *ptr = container_of(data - *pindex, struct trace_array, trace_flags_index); } static ssize_t trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; char *buf; get_tr_index(tr_index, &tr, &index); if (tr->trace_flags & (1 << index)) buf = "1\n"; else buf = "0\n"; return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); } static ssize_t trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { void *tr_index = filp->private_data; struct trace_array *tr; unsigned int index; unsigned long val; int ret; get_tr_index(tr_index, &tr, &index); ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&event_mutex); mutex_lock(&trace_types_lock); ret = set_tracer_flag(tr, 1 << index, val); mutex_unlock(&trace_types_lock); mutex_unlock(&event_mutex); if (ret < 0) return ret; *ppos += cnt; return cnt; } static const struct file_operations trace_options_core_fops = { .open = tracing_open_generic, .read = trace_options_core_read, .write = trace_options_core_write, .llseek = generic_file_llseek, }; struct dentry *trace_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *ret; ret = tracefs_create_file(name, mode, parent, data, fops); if (!ret) pr_warn("Could not create tracefs '%s' entry\n", name); return ret; } static struct dentry *trace_options_init_dentry(struct trace_array *tr) { struct dentry *d_tracer; if (tr->options) return tr->options; d_tracer = tracing_get_dentry(tr); if (IS_ERR(d_tracer)) return NULL; tr->options = tracefs_create_dir("options", d_tracer); if (!tr->options) { pr_warn("Could not create tracefs directory 'options'\n"); return NULL; } return tr->options; } static void create_trace_option_file(struct trace_array *tr, struct trace_option_dentry *topt, struct tracer_flags *flags, struct tracer_opt *opt) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return; topt->flags = flags; topt->opt = opt; topt->tr = tr; topt->entry = trace_create_file(opt->name, TRACE_MODE_WRITE, t_options, topt, &trace_options_fops); } static void create_trace_option_files(struct trace_array *tr, struct tracer *tracer) { struct trace_option_dentry *topts; struct trace_options *tr_topts; struct tracer_flags *flags; struct tracer_opt *opts; int cnt; int i; if (!tracer) return; flags = tracer->flags; if (!flags || !flags->opts) return; /* * If this is an instance, only create flags for tracers * the instance may have. */ if (!trace_ok_for_array(tracer, tr)) return; for (i = 0; i < tr->nr_topts; i++) { /* Make sure there's no duplicate flags. */ if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags)) return; } opts = flags->opts; for (cnt = 0; opts[cnt].name; cnt++) ; topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); if (!topts) return; tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1), GFP_KERNEL); if (!tr_topts) { kfree(topts); return; } tr->topts = tr_topts; tr->topts[tr->nr_topts].tracer = tracer; tr->topts[tr->nr_topts].topts = topts; tr->nr_topts++; for (cnt = 0; opts[cnt].name; cnt++) { create_trace_option_file(tr, &topts[cnt], flags, &opts[cnt]); MEM_FAIL(topts[cnt].entry == NULL, "Failed to create trace option: %s", opts[cnt].name); } } static struct dentry * create_trace_option_core_file(struct trace_array *tr, const char *option, long index) { struct dentry *t_options; t_options = trace_options_init_dentry(tr); if (!t_options) return NULL; return trace_create_file(option, TRACE_MODE_WRITE, t_options, (void *)&tr->trace_flags_index[index], &trace_options_core_fops); } static void create_trace_options_dir(struct trace_array *tr) { struct dentry *t_options; bool top_level = tr == &global_trace; int i; t_options = trace_options_init_dentry(tr); if (!t_options) return; for (i = 0; trace_options[i]; i++) { if (top_level || !((1 << i) & TOP_LEVEL_TRACE_FLAGS)) create_trace_option_core_file(tr, trace_options[i], i); } } static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tracer_tracing_is_on(tr); r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; struct trace_buffer *buffer = tr->array_buffer.buffer; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (buffer) { mutex_lock(&trace_types_lock); if (!!val == tracer_tracing_is_on(tr)) { val = 0; /* do nothing */ } else if (val) { tracer_tracing_on(tr); if (tr->current_trace->start) tr->current_trace->start(tr); } else { tracer_tracing_off(tr); if (tr->current_trace->stop) tr->current_trace->stop(tr); /* Wake up any waiters */ ring_buffer_wake_waiters(buffer, RING_BUFFER_ALL_CPUS); } mutex_unlock(&trace_types_lock); } (*ppos)++; return cnt; } static const struct file_operations rb_simple_fops = { .open = tracing_open_generic_tr, .read = rb_simple_read, .write = rb_simple_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_percent_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; char buf[64]; int r; r = tr->buffer_percent; r = sprintf(buf, "%d\n", r); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_percent_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; if (val > 100) return -EINVAL; tr->buffer_percent = val; (*ppos)++; return cnt; } static const struct file_operations buffer_percent_fops = { .open = tracing_open_generic_tr, .read = buffer_percent_read, .write = buffer_percent_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static ssize_t buffer_subbuf_size_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; size_t size; char buf[64]; int order; int r; order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); size = (PAGE_SIZE << order) / 1024; r = sprintf(buf, "%zd\n", size); return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); } static ssize_t buffer_subbuf_size_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { struct trace_array *tr = filp->private_data; unsigned long val; int old_order; int order; int pages; int ret; ret = kstrtoul_from_user(ubuf, cnt, 10, &val); if (ret) return ret; val *= 1024; /* value passed in is in KB */ pages = DIV_ROUND_UP(val, PAGE_SIZE); order = fls(pages - 1); /* limit between 1 and 128 system pages */ if (order < 0 || order > 7) return -EINVAL; /* Do not allow tracing while changing the order of the ring buffer */ tracing_stop_tr(tr); old_order = ring_buffer_subbuf_order_get(tr->array_buffer.buffer); if (old_order == order) goto out; ret = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, order); if (ret) goto out; #ifdef CONFIG_TRACER_MAX_TRACE if (!tr->allocated_snapshot) goto out_max; ret = ring_buffer_subbuf_order_set(tr->max_buffer.buffer, order); if (ret) { /* Put back the old order */ cnt = ring_buffer_subbuf_order_set(tr->array_buffer.buffer, old_order); if (WARN_ON_ONCE(cnt)) { /* * AARGH! We are left with different orders! * The max buffer is our "snapshot" buffer. * When a tracer needs a snapshot (one of the * latency tracers), it swaps the max buffer * with the saved snap shot. We succeeded to * update the order of the main buffer, but failed to * update the order of the max buffer. But when we tried * to reset the main buffer to the original size, we * failed there too. This is very unlikely to * happen, but if it does, warn and kill all * tracing. */ tracing_disabled = 1; } goto out; } out_max: #endif (*ppos)++; out: if (ret) cnt = ret; tracing_start_tr(tr); return cnt; } static const struct file_operations buffer_subbuf_size_fops = { .open = tracing_open_generic_tr, .read = buffer_subbuf_size_read, .write = buffer_subbuf_size_write, .release = tracing_release_generic_tr, .llseek = default_llseek, }; static struct dentry *trace_instance_dir; static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer); static int allocate_trace_buffer(struct trace_array *tr, struct array_buffer *buf, int size) { enum ring_buffer_flags rb_flags; rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0; buf->tr = tr; if (tr->range_addr_start && tr->range_addr_size) { buf->buffer = ring_buffer_alloc_range(size, rb_flags, 0, tr->range_addr_start, tr->range_addr_size); ring_buffer_last_boot_delta(buf->buffer, &tr->text_delta, &tr->data_delta); /* * This is basically the same as a mapped buffer, * with the same restrictions. */ tr->mapped++; } else { buf->buffer = ring_buffer_alloc(size, rb_flags); } if (!buf->buffer) return -ENOMEM; buf->data = alloc_percpu(struct trace_array_cpu); if (!buf->data) { ring_buffer_free(buf->buffer); buf->buffer = NULL; return -ENOMEM; } /* Allocate the first page for all buffers */ set_buffer_entries(&tr->array_buffer, ring_buffer_size(tr->array_buffer.buffer, 0)); return 0; } static void free_trace_buffer(struct array_buffer *buf) { if (buf->buffer) { ring_buffer_free(buf->buffer); buf->buffer = NULL; free_percpu(buf->data); buf->data = NULL; } } static int allocate_trace_buffers(struct trace_array *tr, int size) { int ret; ret = allocate_trace_buffer(tr, &tr->array_buffer, size); if (ret) return ret; #ifdef CONFIG_TRACER_MAX_TRACE /* Fix mapped buffer trace arrays do not have snapshot buffers */ if (tr->range_addr_start) return 0; ret = allocate_trace_buffer(tr, &tr->max_buffer, allocate_snapshot ? size : 1); if (MEM_FAIL(ret, "Failed to allocate trace buffer\n")) { free_trace_buffer(&tr->array_buffer); return -ENOMEM; } tr->allocated_snapshot = allocate_snapshot; allocate_snapshot = false; #endif return 0; } static void free_trace_buffers(struct trace_array *tr) { if (!tr) return; free_trace_buffer(&tr->array_buffer); #ifdef CONFIG_TRACER_MAX_TRACE free_trace_buffer(&tr->max_buffer); #endif } static void init_trace_flags_index(struct trace_array *tr) { int i; /* Used by the trace options files */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) tr->trace_flags_index[i] = i; } static void __update_tracer_options(struct trace_array *tr) { struct tracer *t; for (t = trace_types; t; t = t->next) add_tracer_options(tr, t); } static void update_tracer_options(struct trace_array *tr) { mutex_lock(&trace_types_lock); tracer_options_updated = true; __update_tracer_options(tr); mutex_unlock(&trace_types_lock); } /* Must have trace_types_lock held */ struct trace_array *trace_array_find(const char *instance) { struct trace_array *tr, *found = NULL; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, instance) == 0) { found = tr; break; } } return found; } struct trace_array *trace_array_find_get(const char *instance) { struct trace_array *tr; mutex_lock(&trace_types_lock); tr = trace_array_find(instance); if (tr) tr->ref++; mutex_unlock(&trace_types_lock); return tr; } static int trace_array_create_dir(struct trace_array *tr) { int ret; tr->dir = tracefs_create_dir(tr->name, trace_instance_dir); if (!tr->dir) return -EINVAL; ret = event_trace_add_tracer(tr->dir, tr); if (ret) { tracefs_remove(tr->dir); return ret; } init_tracer_tracefs(tr, tr->dir); __update_tracer_options(tr); return ret; } static struct trace_array * trace_array_create_systems(const char *name, const char *systems, unsigned long range_addr_start, unsigned long range_addr_size) { struct trace_array *tr; int ret; ret = -ENOMEM; tr = kzalloc(sizeof(*tr), GFP_KERNEL); if (!tr) return ERR_PTR(ret); tr->name = kstrdup(name, GFP_KERNEL); if (!tr->name) goto out_free_tr; if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) goto out_free_tr; if (!zalloc_cpumask_var(&tr->pipe_cpumask, GFP_KERNEL)) goto out_free_tr; if (systems) { tr->system_names = kstrdup_const(systems, GFP_KERNEL); if (!tr->system_names) goto out_free_tr; } /* Only for boot up memory mapped ring buffers */ tr->range_addr_start = range_addr_start; tr->range_addr_size = range_addr_size; tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS; cpumask_copy(tr->tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&tr->start_lock); tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE spin_lock_init(&tr->snapshot_trigger_lock); #endif tr->current_trace = &nop_trace; INIT_LIST_HEAD(&tr->systems); INIT_LIST_HEAD(&tr->events); INIT_LIST_HEAD(&tr->hist_vars); INIT_LIST_HEAD(&tr->err_log); #ifdef CONFIG_MODULES INIT_LIST_HEAD(&tr->mod_events); #endif if (allocate_trace_buffers(tr, trace_buf_size) < 0) goto out_free_tr; /* The ring buffer is defaultly expanded */ trace_set_ring_buffer_expanded(tr); if (ftrace_allocate_ftrace_ops(tr) < 0) goto out_free_tr; ftrace_init_trace_array(tr); init_trace_flags_index(tr); if (trace_instance_dir) { ret = trace_array_create_dir(tr); if (ret) goto out_free_tr; } else __trace_early_add_events(tr); list_add(&tr->list, &ftrace_trace_arrays); tr->ref++; return tr; out_free_tr: ftrace_free_ftrace_ops(tr); free_trace_buffers(tr); free_cpumask_var(tr->pipe_cpumask); free_cpumask_var(tr->tracing_cpumask); kfree_const(tr->system_names); kfree(tr->name); kfree(tr); return ERR_PTR(ret); } static struct trace_array *trace_array_create(const char *name) { return trace_array_create_systems(name, NULL, 0, 0); } static int instance_mkdir(const char *name) { struct trace_array *tr; int ret; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); ret = -EEXIST; if (trace_array_find(name)) return -EEXIST; tr = trace_array_create(name); ret = PTR_ERR_OR_ZERO(tr); return ret; } static u64 map_pages(u64 start, u64 size) { struct page **pages; phys_addr_t page_start; unsigned int page_count; unsigned int i; void *vaddr; page_count = DIV_ROUND_UP(size, PAGE_SIZE); page_start = start; pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); if (!pages) return 0; for (i = 0; i < page_count; i++) { phys_addr_t addr = page_start + i * PAGE_SIZE; pages[i] = pfn_to_page(addr >> PAGE_SHIFT); } vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); kfree(pages); return (u64)(unsigned long)vaddr; } /** * trace_array_get_by_name - Create/Lookup a trace array, given its name. * @name: The name of the trace array to be looked up/created. * @systems: A list of systems to create event directories for (NULL for all) * * Returns pointer to trace array with given name. * NULL, if it cannot be created. * * NOTE: This function increments the reference counter associated with the * trace array returned. This makes sure it cannot be freed while in use. * Use trace_array_put() once the trace array is no longer needed. * If the trace_array is to be freed, trace_array_destroy() needs to * be called after the trace_array_put(), or simply let user space delete * it from the tracefs instances directory. But until the * trace_array_put() is called, user space can not delete it. * */ struct trace_array *trace_array_get_by_name(const char *name, const char *systems) { struct trace_array *tr; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr->name && strcmp(tr->name, name) == 0) { tr->ref++; return tr; } } tr = trace_array_create_systems(name, systems, 0, 0); if (IS_ERR(tr)) tr = NULL; else tr->ref++; return tr; } EXPORT_SYMBOL_GPL(trace_array_get_by_name); static int __remove_instance(struct trace_array *tr) { int i; /* Reference counter for a newly created trace array = 1. */ if (tr->ref > 1 || (tr->current_trace && tr->trace_ref)) return -EBUSY; list_del(&tr->list); /* Disable all the flags that were enabled coming in */ for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) { if ((1 << i) & ZEROED_TRACE_FLAGS) set_tracer_flag(tr, 1 << i, 0); } if (printk_trace == tr) update_printk_trace(&global_trace); tracing_set_nop(tr); clear_ftrace_function_probes(tr); event_trace_del_tracer(tr); ftrace_clear_pids(tr); ftrace_destroy_function_files(tr); tracefs_remove(tr->dir); free_percpu(tr->last_func_repeats); free_trace_buffers(tr); clear_tracing_err_log(tr); for (i = 0; i < tr->nr_topts; i++) { kfree(tr->topts[i].topts); } kfree(tr->topts); free_cpumask_var(tr->pipe_cpumask); free_cpumask_var(tr->tracing_cpumask); kfree_const(tr->system_names); kfree(tr->name); kfree(tr); return 0; } int trace_array_destroy(struct trace_array *this_tr) { struct trace_array *tr; if (!this_tr) return -EINVAL; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); /* Making sure trace array exists before destroying it. */ list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (tr == this_tr) return __remove_instance(tr); } return -ENODEV; } EXPORT_SYMBOL_GPL(trace_array_destroy); static int instance_rmdir(const char *name) { struct trace_array *tr; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); tr = trace_array_find(name); if (!tr) return -ENODEV; return __remove_instance(tr); } static __init void create_trace_instances(struct dentry *d_tracer) { struct trace_array *tr; trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer, instance_mkdir, instance_rmdir); if (MEM_FAIL(!trace_instance_dir, "Failed to create instances directory\n")) return; guard(mutex)(&event_mutex); guard(mutex)(&trace_types_lock); list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->name) continue; if (MEM_FAIL(trace_array_create_dir(tr) < 0, "Failed to create instance directory\n")) return; } } static void init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer) { int cpu; trace_create_file("available_tracers", TRACE_MODE_READ, d_tracer, tr, &show_traces_fops); trace_create_file("current_tracer", TRACE_MODE_WRITE, d_tracer, tr, &set_tracer_fops); trace_create_file("tracing_cpumask", TRACE_MODE_WRITE, d_tracer, tr, &tracing_cpumask_fops); trace_create_file("trace_options", TRACE_MODE_WRITE, d_tracer, tr, &tracing_iter_fops); trace_create_file("trace", TRACE_MODE_WRITE, d_tracer, tr, &tracing_fops); trace_create_file("trace_pipe", TRACE_MODE_READ, d_tracer, tr, &tracing_pipe_fops); trace_create_file("buffer_size_kb", TRACE_MODE_WRITE, d_tracer, tr, &tracing_entries_fops); trace_create_file("buffer_total_size_kb", TRACE_MODE_READ, d_tracer, tr, &tracing_total_entries_fops); trace_create_file("free_buffer", 0200, d_tracer, tr, &tracing_free_buffer_fops); trace_create_file("trace_marker", 0220, d_tracer, tr, &tracing_mark_fops); tr->trace_marker_file = __find_event_file(tr, "ftrace", "print"); trace_create_file("trace_marker_raw", 0220, d_tracer, tr, &tracing_mark_raw_fops); trace_create_file("trace_clock", TRACE_MODE_WRITE, d_tracer, tr, &trace_clock_fops); trace_create_file("tracing_on", TRACE_MODE_WRITE, d_tracer, tr, &rb_simple_fops); trace_create_file("timestamp_mode", TRACE_MODE_READ, d_tracer, tr, &trace_time_stamp_mode_fops); tr->buffer_percent = 50; trace_create_file("buffer_percent", TRACE_MODE_WRITE, d_tracer, tr, &buffer_percent_fops); trace_create_file("buffer_subbuf_size_kb", TRACE_MODE_WRITE, d_tracer, tr, &buffer_subbuf_size_fops); create_trace_options_dir(tr); #ifdef CONFIG_TRACER_MAX_TRACE trace_create_maxlat_file(tr, d_tracer); #endif if (ftrace_create_function_files(tr, d_tracer)) MEM_FAIL(1, "Could not allocate function filter files"); if (tr->range_addr_start) { trace_create_file("last_boot_info", TRACE_MODE_READ, d_tracer, tr, &last_boot_fops); #ifdef CONFIG_TRACER_SNAPSHOT } else { trace_create_file("snapshot", TRACE_MODE_WRITE, d_tracer, tr, &snapshot_fops); #endif } trace_create_file("error_log", TRACE_MODE_WRITE, d_tracer, tr, &tracing_err_log_fops); for_each_tracing_cpu(cpu) tracing_init_tracefs_percpu(tr, cpu); ftrace_init_tracefs(tr, d_tracer); } static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore) { struct vfsmount *mnt; struct file_system_type *type; /* * To maintain backward compatibility for tools that mount * debugfs to get to the tracing facility, tracefs is automatically * mounted to the debugfs/tracing directory. */ type = get_fs_type("tracefs"); if (!type) return NULL; mnt = vfs_submount(mntpt, type, "tracefs", NULL); put_filesystem(type); if (IS_ERR(mnt)) return NULL; mntget(mnt); return mnt; } /** * tracing_init_dentry - initialize top level trace array * * This is called when creating files or directories in the tracing * directory. It is called via fs_initcall() by any of the boot up code * and expects to return the dentry of the top level tracing directory. */ int tracing_init_dentry(void) { struct trace_array *tr = &global_trace; if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Tracing disabled due to lockdown\n"); return -EPERM; } /* The top level trace array uses NULL as parent */ if (tr->dir) return 0; if (WARN_ON(!tracefs_initialized())) return -ENODEV; /* * As there may still be users that expect the tracing * files to exist in debugfs/tracing, we must automount * the tracefs file system there, so older tools still * work with the newer kernel. */ tr->dir = debugfs_create_automount("tracing", NULL, trace_automount, NULL); return 0; } extern struct trace_eval_map *__start_ftrace_eval_maps[]; extern struct trace_eval_map *__stop_ftrace_eval_maps[]; static struct workqueue_struct *eval_map_wq __initdata; static struct work_struct eval_map_work __initdata; static struct work_struct tracerfs_init_work __initdata; static void __init eval_map_work_func(struct work_struct *work) { int len; len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps; trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len); } static int __init trace_eval_init(void) { INIT_WORK(&eval_map_work, eval_map_work_func); eval_map_wq = alloc_workqueue("eval_map_wq", WQ_UNBOUND, 0); if (!eval_map_wq) { pr_err("Unable to allocate eval_map_wq\n"); /* Do work here */ eval_map_work_func(&eval_map_work); return -ENOMEM; } queue_work(eval_map_wq, &eval_map_work); return 0; } subsys_initcall(trace_eval_init); static int __init trace_eval_sync(void) { /* Make sure the eval map updates are finished */ if (eval_map_wq) destroy_workqueue(eval_map_wq); return 0; } late_initcall_sync(trace_eval_sync); #ifdef CONFIG_MODULES bool module_exists(const char *module) { /* All modules have the symbol __this_module */ static const char this_mod[] = "__this_module"; char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; unsigned long val; int n; n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); if (n > sizeof(modname) - 1) return false; val = module_kallsyms_lookup_name(modname); return val != 0; } static void trace_module_add_evals(struct module *mod) { if (!mod->num_trace_evals) return; /* * Modules with bad taint do not have events created, do * not bother with enums either. */ if (trace_module_has_bad_taint(mod)) return; trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals); } #ifdef CONFIG_TRACE_EVAL_MAP_FILE static void trace_module_remove_evals(struct module *mod) { union trace_eval_map_item *map; union trace_eval_map_item **last = &trace_eval_maps; if (!mod->num_trace_evals) return; guard(mutex)(&trace_eval_mutex); map = trace_eval_maps; while (map) { if (map->head.mod == mod) break; map = trace_eval_jmp_to_tail(map); last = &map->tail.next; map = map->tail.next; } if (!map) return; *last = trace_eval_jmp_to_tail(map)->tail.next; kfree(map); } #else static inline void trace_module_remove_evals(struct module *mod) { } #endif /* CONFIG_TRACE_EVAL_MAP_FILE */ static int trace_module_notify(struct notifier_block *self, unsigned long val, void *data) { struct module *mod = data; switch (val) { case MODULE_STATE_COMING: trace_module_add_evals(mod); break; case MODULE_STATE_GOING: trace_module_remove_evals(mod); break; } return NOTIFY_OK; } static struct notifier_block trace_module_nb = { .notifier_call = trace_module_notify, .priority = 0, }; #endif /* CONFIG_MODULES */ static __init void tracer_init_tracefs_work_func(struct work_struct *work) { event_trace_init(); init_tracer_tracefs(&global_trace, NULL); ftrace_init_tracefs_toplevel(&global_trace, NULL); trace_create_file("tracing_thresh", TRACE_MODE_WRITE, NULL, &global_trace, &tracing_thresh_fops); trace_create_file("README", TRACE_MODE_READ, NULL, NULL, &tracing_readme_fops); trace_create_file("saved_cmdlines", TRACE_MODE_READ, NULL, NULL, &tracing_saved_cmdlines_fops); trace_create_file("saved_cmdlines_size", TRACE_MODE_WRITE, NULL, NULL, &tracing_saved_cmdlines_size_fops); trace_create_file("saved_tgids", TRACE_MODE_READ, NULL, NULL, &tracing_saved_tgids_fops); trace_create_eval_file(NULL); #ifdef CONFIG_MODULES register_module_notifier(&trace_module_nb); #endif #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", TRACE_MODE_READ, NULL, NULL, &tracing_dyn_info_fops); #endif create_trace_instances(NULL); update_tracer_options(&global_trace); } static __init int tracer_init_tracefs(void) { int ret; trace_access_lock_init(); ret = tracing_init_dentry(); if (ret) return 0; if (eval_map_wq) { INIT_WORK(&tracerfs_init_work, tracer_init_tracefs_work_func); queue_work(eval_map_wq, &tracerfs_init_work); } else { tracer_init_tracefs_work_func(NULL); } rv_init_interface(); return 0; } fs_initcall(tracer_init_tracefs); static int trace_die_panic_handler(struct notifier_block *self, unsigned long ev, void *unused); static struct notifier_block trace_panic_notifier = { .notifier_call = trace_die_panic_handler, .priority = INT_MAX - 1, }; static struct notifier_block trace_die_notifier = { .notifier_call = trace_die_panic_handler, .priority = INT_MAX - 1, }; /* * The idea is to execute the following die/panic callback early, in order * to avoid showing irrelevant information in the trace (like other panic * notifier functions); we are the 2nd to run, after hung_task/rcu_stall * warnings get disabled (to prevent potential log flooding). */ static int trace_die_panic_handler(struct notifier_block *self, unsigned long ev, void *unused) { if (!ftrace_dump_on_oops_enabled()) return NOTIFY_DONE; /* The die notifier requires DIE_OOPS to trigger */ if (self == &trace_die_notifier && ev != DIE_OOPS) return NOTIFY_DONE; ftrace_dump(DUMP_PARAM); return NOTIFY_DONE; } /* * printk is set to max of 1024, we really don't need it that big. * Nothing should be printing 1000 characters anyway. */ #define TRACE_MAX_PRINT 1000 /* * Define here KERN_TRACE so that we have one place to modify * it if we decide to change what log level the ftrace dump * should be at. */ #define KERN_TRACE KERN_EMERG void trace_printk_seq(struct trace_seq *s) { /* Probably should print a warning here. */ if (s->seq.len >= TRACE_MAX_PRINT) s->seq.len = TRACE_MAX_PRINT; /* * More paranoid code. Although the buffer size is set to * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just * an extra layer of protection. */ if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) s->seq.len = s->seq.size - 1; /* should be zero ended, but we are paranoid. */ s->buffer[s->seq.len] = 0; printk(KERN_TRACE "%s", s->buffer); trace_seq_init(s); } static void trace_init_iter(struct trace_iterator *iter, struct trace_array *tr) { iter->tr = tr; iter->trace = iter->tr->current_trace; iter->cpu_file = RING_BUFFER_ALL_CPUS; iter->array_buffer = &tr->array_buffer; if (iter->trace && iter->trace->open) iter->trace->open(iter); /* Annotate start of buffers if we had overruns */ if (ring_buffer_overruns(iter->array_buffer->buffer)) iter->iter_flags |= TRACE_FILE_ANNOTATE; /* Output in nanoseconds only if we are using a clock in nanoseconds. */ if (trace_clocks[iter->tr->clock_id].in_ns) iter->iter_flags |= TRACE_FILE_TIME_IN_NS; /* Can not use kmalloc for iter.temp and iter.fmt */ iter->temp = static_temp_buf; iter->temp_size = STATIC_TEMP_BUF_SIZE; iter->fmt = static_fmt_buf; iter->fmt_size = STATIC_FMT_BUF_SIZE; } void trace_init_global_iter(struct trace_iterator *iter) { trace_init_iter(iter, &global_trace); } static void ftrace_dump_one(struct trace_array *tr, enum ftrace_dump_mode dump_mode) { /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; unsigned int old_userobj; unsigned long flags; int cnt = 0, cpu; /* * Always turn off tracing when we dump. * We don't need to show trace output of what happens * between multiple crashes. * * If the user does a sysrq-z, then they can re-enable * tracing with echo 1 > tracing_on. */ tracer_tracing_off(tr); local_irq_save(flags); /* Simulate the iterator */ trace_init_iter(&iter, tr); for_each_tracing_cpu(cpu) { atomic_inc(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ; /* don't look at user memory in panic mode */ tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ; if (dump_mode == DUMP_ORIG) iter.cpu_file = raw_smp_processor_id(); else iter.cpu_file = RING_BUFFER_ALL_CPUS; if (tr == &global_trace) printk(KERN_TRACE "Dumping ftrace buffer:\n"); else printk(KERN_TRACE "Dumping ftrace instance %s buffer:\n", tr->name); /* Did function tracer already get disabled? */ if (ftrace_is_dead()) { printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); printk("# MAY BE MISSING FUNCTION EVENTS\n"); } /* * We need to stop all tracing on all CPUS to read * the next buffer. This is a bit expensive, but is * not done often. We fill all what we can read, * and then release the locks again. */ while (!trace_empty(&iter)) { if (!cnt) printk(KERN_TRACE "---------------------------------\n"); cnt++; trace_iterator_reset(&iter); iter.iter_flags |= TRACE_FILE_LAT_FMT; if (trace_find_next_entry_inc(&iter) != NULL) { int ret; ret = print_trace_line(&iter); if (ret != TRACE_TYPE_NO_CONSUME) trace_consume(&iter); } touch_nmi_watchdog(); trace_printk_seq(&iter.seq); } if (!cnt) printk(KERN_TRACE " (ftrace buffer empty)\n"); else printk(KERN_TRACE "---------------------------------\n"); tr->trace_flags |= old_userobj; for_each_tracing_cpu(cpu) { atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled); } local_irq_restore(flags); } static void ftrace_dump_by_param(void) { bool first_param = true; char dump_param[MAX_TRACER_SIZE]; char *buf, *token, *inst_name; struct trace_array *tr; strscpy(dump_param, ftrace_dump_on_oops, MAX_TRACER_SIZE); buf = dump_param; while ((token = strsep(&buf, ",")) != NULL) { if (first_param) { first_param = false; if (!strcmp("0", token)) continue; else if (!strcmp("1", token)) { ftrace_dump_one(&global_trace, DUMP_ALL); continue; } else if (!strcmp("2", token) || !strcmp("orig_cpu", token)) { ftrace_dump_one(&global_trace, DUMP_ORIG); continue; } } inst_name = strsep(&token, "="); tr = trace_array_find(inst_name); if (!tr) { printk(KERN_TRACE "Instance %s not found\n", inst_name); continue; } if (token && (!strcmp("2", token) || !strcmp("orig_cpu", token))) ftrace_dump_one(tr, DUMP_ORIG); else ftrace_dump_one(tr, DUMP_ALL); } } void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { static atomic_t dump_running; /* Only allow one dump user at a time. */ if (atomic_inc_return(&dump_running) != 1) { atomic_dec(&dump_running); return; } switch (oops_dump_mode) { case DUMP_ALL: ftrace_dump_one(&global_trace, DUMP_ALL); break; case DUMP_ORIG: ftrace_dump_one(&global_trace, DUMP_ORIG); break; case DUMP_PARAM: ftrace_dump_by_param(); break; case DUMP_NONE: break; default: printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n"); ftrace_dump_one(&global_trace, DUMP_ALL); } atomic_dec(&dump_running); } EXPORT_SYMBOL_GPL(ftrace_dump); #define WRITE_BUFSIZE 4096 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer, size_t count, loff_t *ppos, int (*createfn)(const char *)) { char *kbuf, *buf, *tmp; int ret = 0; size_t done = 0; size_t size; kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); if (!kbuf) return -ENOMEM; while (done < count) { size = count - done; if (size >= WRITE_BUFSIZE) size = WRITE_BUFSIZE - 1; if (copy_from_user(kbuf, buffer + done, size)) { ret = -EFAULT; goto out; } kbuf[size] = '\0'; buf = kbuf; do { tmp = strchr(buf, '\n'); if (tmp) { *tmp = '\0'; size = tmp - buf + 1; } else { size = strlen(buf); if (done + size < count) { if (buf != kbuf) break; /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */ pr_warn("Line length is too long: Should be less than %d\n", WRITE_BUFSIZE - 2); ret = -EINVAL; goto out; } } done += size; /* Remove comments */ tmp = strchr(buf, '#'); if (tmp) *tmp = '\0'; ret = createfn(buf); if (ret) goto out; buf += size; } while (done < count); } ret = done; out: kfree(kbuf); return ret; } #ifdef CONFIG_TRACER_MAX_TRACE __init static bool tr_needs_alloc_snapshot(const char *name) { char *test; int len = strlen(name); bool ret; if (!boot_snapshot_index) return false; if (strncmp(name, boot_snapshot_info, len) == 0 && boot_snapshot_info[len] == '\t') return true; test = kmalloc(strlen(name) + 3, GFP_KERNEL); if (!test) return false; sprintf(test, "\t%s\t", name); ret = strstr(boot_snapshot_info, test) == NULL; kfree(test); return ret; } __init static void do_allocate_snapshot(const char *name) { if (!tr_needs_alloc_snapshot(name)) return; /* * When allocate_snapshot is set, the next call to * allocate_trace_buffers() (called by trace_array_get_by_name()) * will allocate the snapshot buffer. That will alse clear * this flag. */ allocate_snapshot = true; } #else static inline void do_allocate_snapshot(const char *name) { } #endif __init static void enable_instances(void) { struct trace_array *tr; char *curr_str; char *name; char *str; char *tok; /* A tab is always appended */ boot_instance_info[boot_instance_index - 1] = '\0'; str = boot_instance_info; while ((curr_str = strsep(&str, "\t"))) { phys_addr_t start = 0; phys_addr_t size = 0; unsigned long addr = 0; bool traceprintk = false; bool traceoff = false; char *flag_delim; char *addr_delim; tok = strsep(&curr_str, ","); flag_delim = strchr(tok, '^'); addr_delim = strchr(tok, '@'); if (addr_delim) *addr_delim++ = '\0'; if (flag_delim) *flag_delim++ = '\0'; name = tok; if (flag_delim) { char *flag; while ((flag = strsep(&flag_delim, "^"))) { if (strcmp(flag, "traceoff") == 0) { traceoff = true; } else if ((strcmp(flag, "printk") == 0) || (strcmp(flag, "traceprintk") == 0) || (strcmp(flag, "trace_printk") == 0)) { traceprintk = true; } else { pr_info("Tracing: Invalid instance flag '%s' for %s\n", flag, name); } } } tok = addr_delim; if (tok && isdigit(*tok)) { start = memparse(tok, &tok); if (!start) { pr_warn("Tracing: Invalid boot instance address for %s\n", name); continue; } if (*tok != ':') { pr_warn("Tracing: No size specified for instance %s\n", name); continue; } tok++; size = memparse(tok, &tok); if (!size) { pr_warn("Tracing: Invalid boot instance size for %s\n", name); continue; } } else if (tok) { if (!reserve_mem_find_by_name(tok, &start, &size)) { start = 0; pr_warn("Failed to map boot instance %s to %s\n", name, tok); continue; } } if (start) { addr = map_pages(start, size); if (addr) { pr_info("Tracing: mapped boot instance %s at physical memory %pa of size 0x%lx\n", name, &start, (unsigned long)size); } else { pr_warn("Tracing: Failed to map boot instance %s\n", name); continue; } } else { /* Only non mapped buffers have snapshot buffers */ if (IS_ENABLED(CONFIG_TRACER_MAX_TRACE)) do_allocate_snapshot(name); } tr = trace_array_create_systems(name, NULL, addr, size); if (IS_ERR(tr)) { pr_warn("Tracing: Failed to create instance buffer %s\n", curr_str); continue; } if (traceoff) tracer_tracing_off(tr); if (traceprintk) update_printk_trace(tr); /* * If start is set, then this is a mapped buffer, and * cannot be deleted by user space, so keep the reference * to it. */ if (start) { tr->flags |= TRACE_ARRAY_FL_BOOT; tr->ref++; } while ((tok = strsep(&curr_str, ","))) { early_enable_events(tr, tok, true); } } } __init static int tracer_alloc_buffers(void) { int ring_buf_size; int ret = -ENOMEM; if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Tracing disabled due to lockdown\n"); return -EPERM; } /* * Make sure we don't accidentally add more trace options * than we have bits for. */ BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE); if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) goto out; if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) goto out_free_buffer_mask; /* Only allocate trace_printk buffers if a trace_printk exists */ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt) /* Must be called before global_trace.buffer is allocated */ trace_printk_init_buffers(); /* To save memory, keep the ring buffer size to its minimum */ if (global_trace.ring_buffer_expanded) ring_buf_size = trace_buf_size; else ring_buf_size = 1; cpumask_copy(tracing_buffer_mask, cpu_possible_mask); cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); raw_spin_lock_init(&global_trace.start_lock); /* * The prepare callbacks allocates some memory for the ring buffer. We * don't free the buffer if the CPU goes down. If we were to free * the buffer, then the user would lose any trace that was in the * buffer. The memory will be removed once the "instance" is removed. */ ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE, "trace/RB:prepare", trace_rb_cpu_prepare, NULL); if (ret < 0) goto out_free_cpumask; /* Used for event triggers */ ret = -ENOMEM; temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); if (!temp_buffer) goto out_rm_hp_state; if (trace_create_savedcmd() < 0) goto out_free_temp_buffer; if (!zalloc_cpumask_var(&global_trace.pipe_cpumask, GFP_KERNEL)) goto out_free_savedcmd; /* TODO: make the number of buffers hot pluggable with CPUS */ if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) { MEM_FAIL(1, "tracer: failed to allocate ring buffer!\n"); goto out_free_pipe_cpumask; } if (global_trace.buffer_disabled) tracing_off(); if (trace_boot_clock) { ret = tracing_set_clock(&global_trace, trace_boot_clock); if (ret < 0) pr_warn("Trace clock %s not defined, going back to default\n", trace_boot_clock); } /* * register_tracer() might reference current_trace, so it * needs to be set before we register anything. This is * just a bootstrap of current_trace anyway. */ global_trace.current_trace = &nop_trace; global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; #ifdef CONFIG_TRACER_MAX_TRACE spin_lock_init(&global_trace.snapshot_trigger_lock); #endif ftrace_init_global_array_ops(&global_trace); #ifdef CONFIG_MODULES INIT_LIST_HEAD(&global_trace.mod_events); #endif init_trace_flags_index(&global_trace); register_tracer(&nop_trace); /* Function tracing may start here (via kernel command line) */ init_function_trace(); /* All seems OK, enable tracing */ tracing_disabled = 0; atomic_notifier_chain_register(&panic_notifier_list, &trace_panic_notifier); register_die_notifier(&trace_die_notifier); global_trace.flags = TRACE_ARRAY_FL_GLOBAL; INIT_LIST_HEAD(&global_trace.systems); INIT_LIST_HEAD(&global_trace.events); INIT_LIST_HEAD(&global_trace.hist_vars); INIT_LIST_HEAD(&global_trace.err_log); list_add(&global_trace.list, &ftrace_trace_arrays); apply_trace_boot_options(); register_snapshot_cmd(); return 0; out_free_pipe_cpumask: free_cpumask_var(global_trace.pipe_cpumask); out_free_savedcmd: trace_free_saved_cmdlines_buffer(); out_free_temp_buffer: ring_buffer_free(temp_buffer); out_rm_hp_state: cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE); out_free_cpumask: free_cpumask_var(global_trace.tracing_cpumask); out_free_buffer_mask: free_cpumask_var(tracing_buffer_mask); out: return ret; } #ifdef CONFIG_FUNCTION_TRACER /* Used to set module cached ftrace filtering at boot up */ __init struct trace_array *trace_get_global_array(void) { return &global_trace; } #endif void __init ftrace_boot_snapshot(void) { #ifdef CONFIG_TRACER_MAX_TRACE struct trace_array *tr; if (!snapshot_at_boot) return; list_for_each_entry(tr, &ftrace_trace_arrays, list) { if (!tr->allocated_snapshot) continue; tracing_snapshot_instance(tr); trace_array_puts(tr, "** Boot snapshot taken **\n"); } #endif } void __init early_trace_init(void) { if (tracepoint_printk) { tracepoint_print_iter = kzalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); if (MEM_FAIL(!tracepoint_print_iter, "Failed to allocate trace iterator\n")) tracepoint_printk = 0; else static_key_enable(&tracepoint_printk_key.key); } tracer_alloc_buffers(); init_events(); } void __init trace_init(void) { trace_event_init(); if (boot_instance_index) enable_instances(); } __init static void clear_boot_tracer(void) { /* * The default tracer at boot buffer is an init section. * This function is called in lateinit. If we did not * find the boot tracer, then clear it out, to prevent * later registration from accessing the buffer that is * about to be freed. */ if (!default_bootup_tracer) return; printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", default_bootup_tracer); default_bootup_tracer = NULL; } #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK __init static void tracing_set_default_clock(void) { /* sched_clock_stable() is determined in late_initcall */ if (!trace_boot_clock && !sched_clock_stable()) { if (security_locked_down(LOCKDOWN_TRACEFS)) { pr_warn("Can not set tracing clock due to lockdown\n"); return; } printk(KERN_WARNING "Unstable clock detected, switching default tracing clock to \"global\"\n" "If you want to keep using the local clock, then add:\n" " \"trace_clock=local\"\n" "on the kernel command line\n"); tracing_set_clock(&global_trace, "global"); } } #else static inline void tracing_set_default_clock(void) { } #endif __init static int late_trace_init(void) { if (tracepoint_printk && tracepoint_printk_stop_on_boot) { static_key_disable(&tracepoint_printk_key.key); tracepoint_printk = 0; } tracing_set_default_clock(); clear_boot_tracer(); return 0; } late_initcall_sync(late_trace_init);
22 2118 2119 2113 28 2122 22 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 // SPDX-License-Identifier: GPL-2.0 OR MIT /* * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. * * This is an implementation of the BLAKE2s hash and PRF functions. * * Information: https://blake2.net/ * */ #include <crypto/internal/blake2s.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bug.h> static inline void blake2s_set_lastblock(struct blake2s_state *state) { state->f[0] = -1; } void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) { const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; if (unlikely(!inlen)) return; if (inlen > fill) { memcpy(state->buf + state->buflen, in, fill); blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); state->buflen = 0; in += fill; inlen -= fill; } if (inlen > BLAKE2S_BLOCK_SIZE) { const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); } memcpy(state->buf + state->buflen, in, inlen); state->buflen += inlen; } EXPORT_SYMBOL(blake2s_update); void blake2s_final(struct blake2s_state *state, u8 *out) { WARN_ON(IS_ENABLED(DEBUG) && !out); blake2s_set_lastblock(state); memset(state->buf + state->buflen, 0, BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ blake2s_compress(state, state->buf, 1, state->buflen); cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); memcpy(out, state->h, state->outlen); memzero_explicit(state, sizeof(*state)); } EXPORT_SYMBOL(blake2s_final); static int __init blake2s_mod_init(void) { if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && WARN_ON(!blake2s_selftest())) return -ENODEV; return 0; } module_init(blake2s_mod_init); MODULE_DESCRIPTION("BLAKE2s hash function"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
6 6 6 6 6 6 5 5 4 4 1 1 1 1 1 1 1 1 1 1 6 6 4 4 6 6 4 3 3 2 2 1 1 6 6 6 6 6 6 6 6 6 6 6 2 2 2 2 2 2 6 6 9 6 9 8 7 6 6 6 6 6 6 6 6 6 6 6 6 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 // SPDX-License-Identifier: GPL-2.0-only /* * Driver for the VoIP USB phones with CM109 chipsets. * * Copyright (C) 2007 - 2008 Alfred E. Heggestad <aeh@db.org> */ /* * Tested devices: * - Komunikate KIP1000 * - Genius G-talk * - Allied-Telesis Corega USBPH01 * - ... * * This driver is based on the yealink.c driver * * Thanks to: * - Authors of yealink.c * - Thomas Reitmayr * - Oliver Neukum for good review comments and code * - Shaun Jackman <sjackman@gmail.com> for Genius G-talk keymap * - Dmitry Torokhov for valuable input and review * * Todo: * - Read/write EEPROM */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/rwsem.h> #include <linux/usb/input.h> #define DRIVER_VERSION "20080805" #define DRIVER_AUTHOR "Alfred E. Heggestad" #define DRIVER_DESC "CM109 phone driver" static char *phone = "kip1000"; module_param(phone, charp, S_IRUSR); MODULE_PARM_DESC(phone, "Phone name {kip1000, gtalk, usbph01, atcom}"); enum { /* HID Registers */ HID_IR0 = 0x00, /* Record/Playback-mute button, Volume up/down */ HID_IR1 = 0x01, /* GPI, generic registers or EEPROM_DATA0 */ HID_IR2 = 0x02, /* Generic registers or EEPROM_DATA1 */ HID_IR3 = 0x03, /* Generic registers or EEPROM_CTRL */ HID_OR0 = 0x00, /* Mapping control, buzzer, SPDIF (offset 0x04) */ HID_OR1 = 0x01, /* GPO - General Purpose Output */ HID_OR2 = 0x02, /* Set GPIO to input/output mode */ HID_OR3 = 0x03, /* SPDIF status channel or EEPROM_CTRL */ /* HID_IR0 */ RECORD_MUTE = 1 << 3, PLAYBACK_MUTE = 1 << 2, VOLUME_DOWN = 1 << 1, VOLUME_UP = 1 << 0, /* HID_OR0 */ /* bits 7-6 0: HID_OR1-2 are used for GPO; HID_OR0, 3 are used for buzzer and SPDIF 1: HID_OR0-3 are used as generic HID registers 2: Values written to HID_OR0-3 are also mapped to MCU_CTRL, EEPROM_DATA0-1, EEPROM_CTRL (see Note) 3: Reserved */ HID_OR_GPO_BUZ_SPDIF = 0 << 6, HID_OR_GENERIC_HID_REG = 1 << 6, HID_OR_MAP_MCU_EEPROM = 2 << 6, BUZZER_ON = 1 << 5, /* up to 256 normal keys, up to 15 special key combinations */ KEYMAP_SIZE = 256 + 15, }; /* CM109 protocol packet */ struct cm109_ctl_packet { u8 byte[4]; } __attribute__ ((packed)); enum { USB_PKT_LEN = sizeof(struct cm109_ctl_packet) }; /* CM109 device structure */ struct cm109_dev { struct input_dev *idev; /* input device */ struct usb_device *udev; /* usb device */ struct usb_interface *intf; /* irq input channel */ struct cm109_ctl_packet *irq_data; dma_addr_t irq_dma; struct urb *urb_irq; /* control output channel */ struct cm109_ctl_packet *ctl_data; dma_addr_t ctl_dma; struct usb_ctrlrequest *ctl_req; struct urb *urb_ctl; /* * The 3 bitfields below are protected by ctl_submit_lock. * They have to be separate since they are accessed from IRQ * context. */ unsigned irq_urb_pending:1; /* irq_urb is in flight */ unsigned ctl_urb_pending:1; /* ctl_urb is in flight */ unsigned buzzer_pending:1; /* need to issue buzz command */ spinlock_t ctl_submit_lock; unsigned char buzzer_state; /* on/off */ /* flags */ unsigned open:1; unsigned resetting:1; unsigned shutdown:1; /* This mutex protects writes to the above flags */ struct mutex pm_mutex; unsigned short keymap[KEYMAP_SIZE]; char phys[64]; /* physical device path */ int key_code; /* last reported key */ int keybit; /* 0=new scan 1,2,4,8=scan columns */ u8 gpi; /* Cached value of GPI (high nibble) */ }; /****************************************************************************** * CM109 key interface *****************************************************************************/ static unsigned short special_keymap(int code) { if (code > 0xff) { switch (code - 0xff) { case RECORD_MUTE: return KEY_MICMUTE; case PLAYBACK_MUTE: return KEY_MUTE; case VOLUME_DOWN: return KEY_VOLUMEDOWN; case VOLUME_UP: return KEY_VOLUMEUP; } } return KEY_RESERVED; } /* Map device buttons to internal key events. * * The "up" and "down" keys, are symbolised by arrows on the button. * The "pickup" and "hangup" keys are symbolised by a green and red phone * on the button. Komunikate KIP1000 Keyboard Matrix -> -- 1 -- 2 -- 3 --> GPI pin 4 (0x10) | | | | <- -- 4 -- 5 -- 6 --> GPI pin 5 (0x20) | | | | END - 7 -- 8 -- 9 --> GPI pin 6 (0x40) | | | | OK -- * -- 0 -- # --> GPI pin 7 (0x80) | | | | /|\ /|\ /|\ /|\ | | | | GPO pin: 3 2 1 0 0x8 0x4 0x2 0x1 */ static unsigned short keymap_kip1000(int scancode) { switch (scancode) { /* phone key: */ case 0x82: return KEY_NUMERIC_0; /* 0 */ case 0x14: return KEY_NUMERIC_1; /* 1 */ case 0x12: return KEY_NUMERIC_2; /* 2 */ case 0x11: return KEY_NUMERIC_3; /* 3 */ case 0x24: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x21: return KEY_NUMERIC_6; /* 6 */ case 0x44: return KEY_NUMERIC_7; /* 7 */ case 0x42: return KEY_NUMERIC_8; /* 8 */ case 0x41: return KEY_NUMERIC_9; /* 9 */ case 0x81: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x88: return KEY_ENTER; /* pickup */ case 0x48: return KEY_ESC; /* hangup */ case 0x28: return KEY_LEFT; /* IN */ case 0x18: return KEY_RIGHT; /* OUT */ default: return special_keymap(scancode); } } /* Contributed by Shaun Jackman <sjackman@gmail.com> Genius G-Talk keyboard matrix 0 1 2 3 4: 0 4 8 Talk 5: 1 5 9 End 6: 2 6 # Up 7: 3 7 * Down */ static unsigned short keymap_gtalk(int scancode) { switch (scancode) { case 0x11: return KEY_NUMERIC_0; case 0x21: return KEY_NUMERIC_1; case 0x41: return KEY_NUMERIC_2; case 0x81: return KEY_NUMERIC_3; case 0x12: return KEY_NUMERIC_4; case 0x22: return KEY_NUMERIC_5; case 0x42: return KEY_NUMERIC_6; case 0x82: return KEY_NUMERIC_7; case 0x14: return KEY_NUMERIC_8; case 0x24: return KEY_NUMERIC_9; case 0x44: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* Talk (green handset) */ case 0x28: return KEY_ESC; /* End (red handset) */ case 0x48: return KEY_UP; /* Menu up (rocker switch) */ case 0x88: return KEY_DOWN; /* Menu down (rocker switch) */ default: return special_keymap(scancode); } } /* * Keymap for Allied-Telesis Corega USBPH01 * http://www.alliedtelesis-corega.com/2/1344/1437/1360/chprd.html * * Contributed by july@nat.bg */ static unsigned short keymap_usbph01(int scancode) { switch (scancode) { case 0x11: return KEY_NUMERIC_0; /* 0 */ case 0x21: return KEY_NUMERIC_1; /* 1 */ case 0x41: return KEY_NUMERIC_2; /* 2 */ case 0x81: return KEY_NUMERIC_3; /* 3 */ case 0x12: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x42: return KEY_NUMERIC_6; /* 6 */ case 0x82: return KEY_NUMERIC_7; /* 7 */ case 0x14: return KEY_NUMERIC_8; /* 8 */ case 0x24: return KEY_NUMERIC_9; /* 9 */ case 0x44: return KEY_NUMERIC_POUND; /* # */ case 0x84: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* pickup */ case 0x28: return KEY_ESC; /* hangup */ case 0x48: return KEY_LEFT; /* IN */ case 0x88: return KEY_RIGHT; /* OUT */ default: return special_keymap(scancode); } } /* * Keymap for ATCom AU-100 * http://www.atcom.cn/products.html * http://www.packetizer.com/products/au100/ * http://www.voip-info.org/wiki/view/AU-100 * * Contributed by daniel@gimpelevich.san-francisco.ca.us */ static unsigned short keymap_atcom(int scancode) { switch (scancode) { /* phone key: */ case 0x82: return KEY_NUMERIC_0; /* 0 */ case 0x11: return KEY_NUMERIC_1; /* 1 */ case 0x12: return KEY_NUMERIC_2; /* 2 */ case 0x14: return KEY_NUMERIC_3; /* 3 */ case 0x21: return KEY_NUMERIC_4; /* 4 */ case 0x22: return KEY_NUMERIC_5; /* 5 */ case 0x24: return KEY_NUMERIC_6; /* 6 */ case 0x41: return KEY_NUMERIC_7; /* 7 */ case 0x42: return KEY_NUMERIC_8; /* 8 */ case 0x44: return KEY_NUMERIC_9; /* 9 */ case 0x84: return KEY_NUMERIC_POUND; /* # */ case 0x81: return KEY_NUMERIC_STAR; /* * */ case 0x18: return KEY_ENTER; /* pickup */ case 0x28: return KEY_ESC; /* hangup */ case 0x48: return KEY_LEFT; /* left arrow */ case 0x88: return KEY_RIGHT; /* right arrow */ default: return special_keymap(scancode); } } static unsigned short (*keymap)(int) = keymap_kip1000; /* * Completes a request by converting the data into events for the * input subsystem. */ static void report_key(struct cm109_dev *dev, int key) { struct input_dev *idev = dev->idev; if (dev->key_code >= 0) { /* old key up */ input_report_key(idev, dev->key_code, 0); } dev->key_code = key; if (key >= 0) { /* new valid key */ input_report_key(idev, key, 1); } input_sync(idev); } /* * Converts data of special key presses (volume, mute) into events * for the input subsystem, sends press-n-release for mute keys. */ static void cm109_report_special(struct cm109_dev *dev) { static const u8 autorelease = RECORD_MUTE | PLAYBACK_MUTE; struct input_dev *idev = dev->idev; u8 data = dev->irq_data->byte[HID_IR0]; unsigned short keycode; int i; for (i = 0; i < 4; i++) { keycode = dev->keymap[0xff + BIT(i)]; if (keycode == KEY_RESERVED) continue; input_report_key(idev, keycode, data & BIT(i)); if (data & autorelease & BIT(i)) { input_sync(idev); input_report_key(idev, keycode, 0); } } input_sync(idev); } /****************************************************************************** * CM109 usb communication interface *****************************************************************************/ static void cm109_submit_buzz_toggle(struct cm109_dev *dev) { int error; if (dev->buzzer_state) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC); if (error) dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", __func__, error); } static void cm109_submit_ctl(struct cm109_dev *dev) { int error; guard(spinlock_irqsave)(&dev->ctl_submit_lock); dev->irq_urb_pending = 0; if (unlikely(dev->shutdown)) return; if (dev->buzzer_state) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; dev->ctl_data->byte[HID_OR1] = dev->keybit; dev->ctl_data->byte[HID_OR2] = dev->keybit; dev->buzzer_pending = 0; dev->ctl_urb_pending = 1; error = usb_submit_urb(dev->urb_ctl, GFP_ATOMIC); if (error) dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", __func__, error); } /* * IRQ handler */ static void cm109_urb_irq_callback(struct urb *urb) { struct cm109_dev *dev = urb->context; const int status = urb->status; dev_dbg(&dev->intf->dev, "### URB IRQ: [0x%02x 0x%02x 0x%02x 0x%02x] keybit=0x%02x\n", dev->irq_data->byte[0], dev->irq_data->byte[1], dev->irq_data->byte[2], dev->irq_data->byte[3], dev->keybit); if (status) { if (status == -ESHUTDOWN) return; dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n", __func__, status); goto out; } /* Special keys */ cm109_report_special(dev); /* Scan key column */ if (dev->keybit == 0xf) { /* Any changes ? */ if ((dev->gpi & 0xf0) == (dev->irq_data->byte[HID_IR1] & 0xf0)) goto out; dev->gpi = dev->irq_data->byte[HID_IR1] & 0xf0; dev->keybit = 0x1; } else { report_key(dev, dev->keymap[dev->irq_data->byte[HID_IR1]]); dev->keybit <<= 1; if (dev->keybit > 0x8) dev->keybit = 0xf; } out: cm109_submit_ctl(dev); } static void cm109_urb_ctl_callback(struct urb *urb) { struct cm109_dev *dev = urb->context; const int status = urb->status; int error; dev_dbg(&dev->intf->dev, "### URB CTL: [0x%02x 0x%02x 0x%02x 0x%02x]\n", dev->ctl_data->byte[0], dev->ctl_data->byte[1], dev->ctl_data->byte[2], dev->ctl_data->byte[3]); if (status) { if (status == -ESHUTDOWN) return; dev_err_ratelimited(&dev->intf->dev, "%s: urb status %d\n", __func__, status); } guard(spinlock_irqsave)(&dev->ctl_submit_lock); dev->ctl_urb_pending = 0; if (unlikely(dev->shutdown)) return; if (dev->buzzer_pending || status) { dev->buzzer_pending = 0; dev->ctl_urb_pending = 1; cm109_submit_buzz_toggle(dev); } else if (likely(!dev->irq_urb_pending)) { /* ask for key data */ dev->irq_urb_pending = 1; error = usb_submit_urb(dev->urb_irq, GFP_ATOMIC); if (error) dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_irq) failed %d\n", __func__, error); } } static void cm109_toggle_buzzer_async(struct cm109_dev *dev) { guard(spinlock_irqsave)(&dev->ctl_submit_lock); if (dev->ctl_urb_pending) { /* URB completion will resubmit */ dev->buzzer_pending = 1; } else { dev->ctl_urb_pending = 1; cm109_submit_buzz_toggle(dev); } } static void cm109_toggle_buzzer_sync(struct cm109_dev *dev, int on) { int error; if (on) dev->ctl_data->byte[HID_OR0] |= BUZZER_ON; else dev->ctl_data->byte[HID_OR0] &= ~BUZZER_ON; error = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), dev->ctl_req->bRequest, dev->ctl_req->bRequestType, le16_to_cpu(dev->ctl_req->wValue), le16_to_cpu(dev->ctl_req->wIndex), dev->ctl_data, USB_PKT_LEN, USB_CTRL_SET_TIMEOUT); if (error < 0 && error != -EINTR) dev_err(&dev->intf->dev, "%s: usb_control_msg() failed %d\n", __func__, error); } static void cm109_stop_traffic(struct cm109_dev *dev) { dev->shutdown = 1; /* * Make sure other CPUs see this */ smp_wmb(); usb_kill_urb(dev->urb_ctl); usb_kill_urb(dev->urb_irq); cm109_toggle_buzzer_sync(dev, 0); dev->shutdown = 0; smp_wmb(); } static void cm109_restore_state(struct cm109_dev *dev) { if (dev->open) { /* * Restore buzzer state. * This will also kick regular URB submission */ cm109_toggle_buzzer_async(dev); } } /****************************************************************************** * input event interface *****************************************************************************/ static int cm109_input_open(struct input_dev *idev) { struct cm109_dev *dev = input_get_drvdata(idev); int error; error = usb_autopm_get_interface(dev->intf); if (error < 0) { dev_err(&idev->dev, "%s - cannot autoresume, result %d\n", __func__, error); return error; } scoped_guard(mutex, &dev->pm_mutex) { dev->buzzer_state = 0; dev->key_code = -1; /* no keys pressed */ dev->keybit = 0xf; /* issue INIT */ dev->ctl_data->byte[HID_OR0] = HID_OR_GPO_BUZ_SPDIF; dev->ctl_data->byte[HID_OR1] = dev->keybit; dev->ctl_data->byte[HID_OR2] = dev->keybit; dev->ctl_data->byte[HID_OR3] = 0x00; dev->ctl_urb_pending = 1; error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL); if (!error) { dev->open = 1; return 0; } } dev->ctl_urb_pending = 0; usb_autopm_put_interface(dev->intf); dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n", __func__, error); return error; } static void cm109_input_close(struct input_dev *idev) { struct cm109_dev *dev = input_get_drvdata(idev); scoped_guard(mutex, &dev->pm_mutex) { /* * Once we are here event delivery is stopped so we * don't need to worry about someone starting buzzer * again */ cm109_stop_traffic(dev); dev->open = 0; } usb_autopm_put_interface(dev->intf); } static int cm109_input_ev(struct input_dev *idev, unsigned int type, unsigned int code, int value) { struct cm109_dev *dev = input_get_drvdata(idev); dev_dbg(&dev->intf->dev, "input_ev: type=%u code=%u value=%d\n", type, code, value); if (type != EV_SND) return -EINVAL; switch (code) { case SND_TONE: case SND_BELL: dev->buzzer_state = !!value; if (!dev->resetting) cm109_toggle_buzzer_async(dev); return 0; default: return -EINVAL; } } /****************************************************************************** * Linux interface and usb initialisation *****************************************************************************/ struct driver_info { char *name; }; static const struct driver_info info_cm109 = { .name = "CM109 USB driver", }; enum { VENDOR_ID = 0x0d8c, /* C-Media Electronics */ PRODUCT_ID_CM109 = 0x000e, /* CM109 defines range 0x0008 - 0x000f */ }; /* table of devices that work with this driver */ static const struct usb_device_id cm109_usb_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, .idVendor = VENDOR_ID, .idProduct = PRODUCT_ID_CM109, .bInterfaceClass = USB_CLASS_HID, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, .driver_info = (kernel_ulong_t) &info_cm109 }, /* you can add more devices here with product ID 0x0008 - 0x000f */ { } }; static void cm109_usb_cleanup(struct cm109_dev *dev) { kfree(dev->ctl_req); usb_free_coherent(dev->udev, USB_PKT_LEN, dev->ctl_data, dev->ctl_dma); usb_free_coherent(dev->udev, USB_PKT_LEN, dev->irq_data, dev->irq_dma); usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */ usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */ kfree(dev); } static void cm109_usb_disconnect(struct usb_interface *interface) { struct cm109_dev *dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); input_unregister_device(dev->idev); cm109_usb_cleanup(dev); } static int cm109_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct driver_info *nfo = (struct driver_info *)id->driver_info; struct usb_host_interface *interface; struct usb_endpoint_descriptor *endpoint; struct cm109_dev *dev; struct input_dev *input_dev = NULL; int ret, pipe, i; int error = -ENOMEM; interface = intf->cur_altsetting; if (interface->desc.bNumEndpoints < 1) return -ENODEV; endpoint = &interface->endpoint[0].desc; if (!usb_endpoint_is_int_in(endpoint)) return -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->ctl_submit_lock); mutex_init(&dev->pm_mutex); dev->udev = udev; dev->intf = intf; dev->idev = input_dev = input_allocate_device(); if (!input_dev) goto err_out; /* allocate usb buffers */ dev->irq_data = usb_alloc_coherent(udev, USB_PKT_LEN, GFP_KERNEL, &dev->irq_dma); if (!dev->irq_data) goto err_out; dev->ctl_data = usb_alloc_coherent(udev, USB_PKT_LEN, GFP_KERNEL, &dev->ctl_dma); if (!dev->ctl_data) goto err_out; dev->ctl_req = kmalloc(sizeof(*(dev->ctl_req)), GFP_KERNEL); if (!dev->ctl_req) goto err_out; /* allocate urb structures */ dev->urb_irq = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_irq) goto err_out; dev->urb_ctl = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_ctl) goto err_out; /* get a handle to the interrupt data pipe */ pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); ret = usb_maxpacket(udev, pipe); if (ret != USB_PKT_LEN) dev_err(&intf->dev, "invalid payload size %d, expected %d\n", ret, USB_PKT_LEN); /* initialise irq urb */ usb_fill_int_urb(dev->urb_irq, udev, pipe, dev->irq_data, USB_PKT_LEN, cm109_urb_irq_callback, dev, endpoint->bInterval); dev->urb_irq->transfer_dma = dev->irq_dma; dev->urb_irq->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev->urb_irq->dev = udev; /* initialise ctl urb */ dev->ctl_req->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT; dev->ctl_req->bRequest = USB_REQ_SET_CONFIGURATION; dev->ctl_req->wValue = cpu_to_le16(0x200); dev->ctl_req->wIndex = cpu_to_le16(interface->desc.bInterfaceNumber); dev->ctl_req->wLength = cpu_to_le16(USB_PKT_LEN); usb_fill_control_urb(dev->urb_ctl, udev, usb_sndctrlpipe(udev, 0), (void *)dev->ctl_req, dev->ctl_data, USB_PKT_LEN, cm109_urb_ctl_callback, dev); dev->urb_ctl->transfer_dma = dev->ctl_dma; dev->urb_ctl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; dev->urb_ctl->dev = udev; /* find out the physical bus location */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); /* register settings for the input device */ input_dev->name = nfo->name; input_dev->phys = dev->phys; usb_to_input_id(udev, &input_dev->id); input_dev->dev.parent = &intf->dev; input_set_drvdata(input_dev, dev); input_dev->open = cm109_input_open; input_dev->close = cm109_input_close; input_dev->event = cm109_input_ev; input_dev->keycode = dev->keymap; input_dev->keycodesize = sizeof(unsigned char); input_dev->keycodemax = ARRAY_SIZE(dev->keymap); input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_SND); input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE); /* register available key events */ for (i = 0; i < KEYMAP_SIZE; i++) { unsigned short k = keymap(i); dev->keymap[i] = k; __set_bit(k, input_dev->keybit); } __clear_bit(KEY_RESERVED, input_dev->keybit); error = input_register_device(dev->idev); if (error) goto err_out; usb_set_intfdata(intf, dev); return 0; err_out: input_free_device(input_dev); cm109_usb_cleanup(dev); return error; } static int cm109_usb_suspend(struct usb_interface *intf, pm_message_t message) { struct cm109_dev *dev = usb_get_intfdata(intf); dev_info(&intf->dev, "cm109: usb_suspend (event=%d)\n", message.event); guard(mutex)(&dev->pm_mutex); cm109_stop_traffic(dev); return 0; } static int cm109_usb_resume(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); dev_info(&intf->dev, "cm109: usb_resume\n"); guard(mutex)(&dev->pm_mutex); cm109_restore_state(dev); return 0; } static int cm109_usb_pre_reset(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); mutex_lock(&dev->pm_mutex); /* * Make sure input events don't try to toggle buzzer * while we are resetting */ dev->resetting = 1; smp_wmb(); cm109_stop_traffic(dev); return 0; } static int cm109_usb_post_reset(struct usb_interface *intf) { struct cm109_dev *dev = usb_get_intfdata(intf); dev->resetting = 0; smp_wmb(); cm109_restore_state(dev); mutex_unlock(&dev->pm_mutex); return 0; } static struct usb_driver cm109_driver = { .name = "cm109", .probe = cm109_usb_probe, .disconnect = cm109_usb_disconnect, .suspend = cm109_usb_suspend, .resume = cm109_usb_resume, .reset_resume = cm109_usb_resume, .pre_reset = cm109_usb_pre_reset, .post_reset = cm109_usb_post_reset, .id_table = cm109_usb_table, .supports_autosuspend = 1, }; static int __init cm109_select_keymap(void) { /* Load the phone keymap */ if (!strcasecmp(phone, "kip1000")) { keymap = keymap_kip1000; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Komunikate KIP1000 phone loaded\n"); } else if (!strcasecmp(phone, "gtalk")) { keymap = keymap_gtalk; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Genius G-talk phone loaded\n"); } else if (!strcasecmp(phone, "usbph01")) { keymap = keymap_usbph01; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for Allied-Telesis Corega USBPH01 phone loaded\n"); } else if (!strcasecmp(phone, "atcom")) { keymap = keymap_atcom; printk(KERN_INFO KBUILD_MODNAME ": " "Keymap for ATCom AU-100 phone loaded\n"); } else { printk(KERN_ERR KBUILD_MODNAME ": " "Unsupported phone: %s\n", phone); return -EINVAL; } return 0; } static int __init cm109_init(void) { int err; err = cm109_select_keymap(); if (err) return err; err = usb_register(&cm109_driver); if (err) return err; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC ": " DRIVER_VERSION " (C) " DRIVER_AUTHOR "\n"); return 0; } static void __exit cm109_exit(void) { usb_deregister(&cm109_driver); } module_init(cm109_init); module_exit(cm109_exit); MODULE_DEVICE_TABLE(usb, cm109_usb_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
7 7 2 2 2 2 2 2 1 2 1 2 2 2 2 1 2 2 2 2 7 2 1 1 7 7 1 6 6 1 7 2 1 2 2 6 1 2 2 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 // SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Lenovo: * - ThinkPad USB Keyboard with TrackPoint (tpkbd) * - ThinkPad Compact Bluetooth Keyboard with TrackPoint (cptkbd) * - ThinkPad Compact USB Keyboard with TrackPoint (cptkbd) * - ThinkPad TrackPoint Keyboard II USB/Bluetooth (cptkbd/tpIIkbd) * * Copyright (c) 2012 Bernhard Seibold * Copyright (c) 2014 Jamie Lentin <jm@lentin.co.uk> * * Linux IBM/Lenovo Scrollpoint mouse driver: * - IBM Scrollpoint III * - IBM Scrollpoint Pro * - IBM Scrollpoint Optical * - IBM Scrollpoint Optical 800dpi * - IBM Scrollpoint Optical 800dpi Pro * - Lenovo Scrollpoint Optical * * Copyright (c) 2012 Peter De Wachter <pdewacht@gmail.com> * Copyright (c) 2018 Peter Ganzhorn <peter.ganzhorn@gmail.com> */ /* */ #include <linux/module.h> #include <linux/sysfs.h> #include <linux/device.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/leds.h> #include <linux/workqueue.h> #if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE) #include <linux/platform_profile.h> #endif /* CONFIG_ACPI_PLATFORM_PROFILE */ #include "hid-ids.h" /* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */ #define LENOVO_KEY_MICMUTE KEY_F20 /* HID raw events for ThinkPad X12 Tabs*/ #define TP_X12_RAW_HOTKEY_FN_F4 0x00020003 #define TP_X12_RAW_HOTKEY_FN_F8 0x38001003 #define TP_X12_RAW_HOTKEY_FN_F10 0x00000803 #define TP_X12_RAW_HOTKEY_FN_F12 0x00000403 #define TP_X12_RAW_HOTKEY_FN_SPACE 0x18001003 struct lenovo_drvdata { u8 led_report[3]; /* Must be first for proper alignment */ int led_state; struct mutex led_report_mutex; struct led_classdev led_mute; struct led_classdev led_micmute; struct work_struct fn_lock_sync_work; struct hid_device *hdev; int press_to_select; int dragging; int release_to_select; int select_right; int sensitivity; int press_speed; /* 0: Up * 1: Down (undecided) * 2: Scrolling */ u8 middlebutton_state; bool fn_lock; bool middleclick_workaround_cptkbd; }; #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) #define TP10UBKBD_LED_OUTPUT_REPORT 9 #define TP10UBKBD_FN_LOCK_LED 0x54 #define TP10UBKBD_MUTE_LED 0x64 #define TP10UBKBD_MICMUTE_LED 0x74 #define TP10UBKBD_LED_OFF 1 #define TP10UBKBD_LED_ON 2 /* Function to report raw_events as key events*/ static inline void report_key_event(struct input_dev *input, int keycode) { input_report_key(input, keycode, 1); input_report_key(input, keycode, 0); input_sync(input); } static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code, enum led_brightness value) { struct lenovo_drvdata *data = hid_get_drvdata(hdev); int ret; mutex_lock(&data->led_report_mutex); data->led_report[0] = TP10UBKBD_LED_OUTPUT_REPORT; data->led_report[1] = led_code; data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF; ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); if (ret != 3) { if (ret != -ENODEV) hid_err(hdev, "Set LED output report error: %d\n", ret); ret = ret < 0 ? ret : -EIO; } else { ret = 0; } mutex_unlock(&data->led_report_mutex); return ret; } static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work) { struct lenovo_drvdata *data = container_of(work, struct lenovo_drvdata, fn_lock_sync_work); lenovo_led_set_tp10ubkbd(data->hdev, TP10UBKBD_FN_LOCK_LED, data->fn_lock); } static const __u8 lenovo_pro_dock_need_fixup_collection[] = { 0x05, 0x88, /* Usage Page (Vendor Usage Page 0x88) */ 0x09, 0x01, /* Usage (Vendor Usage 0x01) */ 0xa1, 0x01, /* Collection (Application) */ 0x85, 0x04, /* Report ID (4) */ 0x19, 0x00, /* Usage Minimum (0) */ 0x2a, 0xff, 0xff, /* Usage Maximum (65535) */ }; /* Broken ThinkPad TrackPoint II collection (Bluetooth mode) */ static const __u8 lenovo_tpIIbtkbd_need_fixup_collection[] = { 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined 0xFF00) */ 0x09, 0x01, /* Usage (0x01) */ 0xA1, 0x01, /* Collection (Application) */ 0x85, 0x05, /* Report ID (5) */ 0x1A, 0xF1, 0x00, /* Usage Minimum (0xF1) */ 0x2A, 0xFC, 0x00, /* Usage Maximum (0xFC) */ 0x15, 0x00, /* Logical Minimum (0) */ 0x25, 0x01, /* Logical Maximum (1) */ 0x75, 0x01, /* Report Size (1) */ 0x95, 0x0D, /* Report Count (13) */ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */ 0x95, 0x03, /* Report Count (3) */ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */ }; static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { switch (hdev->product) { case USB_DEVICE_ID_LENOVO_TPPRODOCK: /* the fixups that need to be done: * - get a reasonable usage max for the vendor collection * 0x8801 from the report ID 4 */ if (*rsize >= 153 && memcmp(&rdesc[140], lenovo_pro_dock_need_fixup_collection, sizeof(lenovo_pro_dock_need_fixup_collection)) == 0) { rdesc[151] = 0x01; rdesc[152] = 0x00; } break; case USB_DEVICE_ID_LENOVO_TPIIBTKBD: if (*rsize >= 263 && memcmp(&rdesc[234], lenovo_tpIIbtkbd_need_fixup_collection, sizeof(lenovo_tpIIbtkbd_need_fixup_collection)) == 0) { rdesc[244] = 0x00; /* usage minimum = 0x00 */ rdesc[247] = 0xff; /* usage maximum = 0xff */ rdesc[252] = 0xff; /* logical maximum = 0xff */ rdesc[254] = 0x08; /* report size = 0x08 */ rdesc[256] = 0x01; /* report count = 0x01 */ rdesc[258] = 0x00; /* input = 0x00 */ rdesc[260] = 0x01; /* report count (2) = 0x01 */ } break; } return rdesc; } static int lenovo_input_mapping_tpkbd(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if (usage->hid == (HID_UP_BUTTON | 0x0010)) { /* This sub-device contains trackpoint, mark it */ hid_set_drvdata(hdev, (void *)1); map_key_clear(LENOVO_KEY_MICMUTE); return 1; } return 0; } static int lenovo_input_mapping_cptkbd(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* HID_UP_LNVENDOR = USB, HID_UP_MSVENDOR = BT */ if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR || (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) { switch (usage->hid & HID_USAGE) { case 0x00f1: /* Fn-F4: Mic mute */ map_key_clear(LENOVO_KEY_MICMUTE); return 1; case 0x00f2: /* Fn-F5: Brightness down */ map_key_clear(KEY_BRIGHTNESSDOWN); return 1; case 0x00f3: /* Fn-F6: Brightness up */ map_key_clear(KEY_BRIGHTNESSUP); return 1; case 0x00f4: /* Fn-F7: External display (projector) */ map_key_clear(KEY_SWITCHVIDEOMODE); return 1; case 0x00f5: /* Fn-F8: Wireless */ map_key_clear(KEY_WLAN); return 1; case 0x00f6: /* Fn-F9: Control panel */ map_key_clear(KEY_CONFIG); return 1; case 0x00f8: /* Fn-F11: View open applications (3 boxes) */ map_key_clear(KEY_SCALE); return 1; case 0x00f9: /* Fn-F12: Open My computer (6 boxes) USB-only */ /* NB: This mapping is invented in raw_event below */ map_key_clear(KEY_FILE); return 1; case 0x00fa: /* Fn-Esc: Fn-lock toggle */ map_key_clear(KEY_FN_ESC); return 1; case 0x00fb: /* Middle mouse button (in native mode) */ map_key_clear(BTN_MIDDLE); return 1; } } /* Compatibility middle/wheel mappings should be ignored */ if (usage->hid == HID_GD_WHEEL) return -1; if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON && (usage->hid & HID_USAGE) == 0x003) return -1; if ((usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER && (usage->hid & HID_USAGE) == 0x238) return -1; /* Map wheel emulation reports: 0xffa1 = USB, 0xff10 = BT */ if ((usage->hid & HID_USAGE_PAGE) == 0xff100000 || (usage->hid & HID_USAGE_PAGE) == 0xffa10000) { field->flags |= HID_MAIN_ITEM_RELATIVE | HID_MAIN_ITEM_VARIABLE; field->logical_minimum = -127; field->logical_maximum = 127; switch (usage->hid & HID_USAGE) { case 0x0000: hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL); return 1; case 0x0001: hid_map_usage(hi, usage, bit, max, EV_REL, REL_WHEEL); return 1; default: return -1; } } return 0; } static int lenovo_input_mapping_tpIIkbd(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* * 0xff0a0000 = USB, HID_UP_MSVENDOR = BT. * * In BT mode, there are two HID_UP_MSVENDOR pages. * Use only the page that contains report ID == 5. */ if (((usage->hid & HID_USAGE_PAGE) == 0xff0a0000 || (usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR) && field->report->id == 5) { switch (usage->hid & HID_USAGE) { case 0x00bb: /* Fn-F4: Mic mute */ map_key_clear(LENOVO_KEY_MICMUTE); return 1; case 0x00c3: /* Fn-F5: Brightness down */ map_key_clear(KEY_BRIGHTNESSDOWN); return 1; case 0x00c4: /* Fn-F6: Brightness up */ map_key_clear(KEY_BRIGHTNESSUP); return 1; case 0x00c1: /* Fn-F8: Notification center */ map_key_clear(KEY_NOTIFICATION_CENTER); return 1; case 0x00bc: /* Fn-F9: Control panel */ map_key_clear(KEY_CONFIG); return 1; case 0x00b6: /* Fn-F10: Bluetooth */ map_key_clear(KEY_BLUETOOTH); return 1; case 0x00b7: /* Fn-F11: Keyboard config */ map_key_clear(KEY_KEYBOARD); return 1; case 0x00b8: /* Fn-F12: User function */ map_key_clear(KEY_PROG1); return 1; case 0x00b9: /* Fn-PrtSc: Snipping tool */ map_key_clear(KEY_SELECTIVE_SCREENSHOT); return 1; case 0x00b5: /* Fn-Esc: Fn-lock toggle */ map_key_clear(KEY_FN_ESC); return 1; } } if ((usage->hid & HID_USAGE_PAGE) == 0xffa00000) { switch (usage->hid & HID_USAGE) { case 0x00fb: /* Middle mouse (in native USB mode) */ map_key_clear(BTN_MIDDLE); return 1; } } if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR && field->report->id == 21) { switch (usage->hid & HID_USAGE) { case 0x0004: /* Middle mouse (in native Bluetooth mode) */ map_key_clear(BTN_MIDDLE); return 1; } } /* Compatibility middle/wheel mappings should be ignored */ if (usage->hid == HID_GD_WHEEL) return -1; if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON && (usage->hid & HID_USAGE) == 0x003) return -1; if ((usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER && (usage->hid & HID_USAGE) == 0x238) return -1; /* Map wheel emulation reports: 0xff10 */ if ((usage->hid & HID_USAGE_PAGE) == 0xff100000) { field->flags |= HID_MAIN_ITEM_RELATIVE | HID_MAIN_ITEM_VARIABLE; field->logical_minimum = -127; field->logical_maximum = 127; switch (usage->hid & HID_USAGE) { case 0x0000: hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL); return 1; case 0x0001: hid_map_usage(hi, usage, bit, max, EV_REL, REL_WHEEL); return 1; default: return -1; } } return 0; } static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if (usage->hid == HID_GD_Z) { hid_map_usage(hi, usage, bit, max, EV_REL, REL_HWHEEL); return 1; } return 0; } static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* * The ThinkPad 10 Ultrabook Keyboard uses 0x000c0001 usage for * a bunch of keys which have no standard consumer page code. */ if (usage->hid == 0x000c0001) { switch (usage->usage_index) { case 8: /* Fn-Esc: Fn-lock toggle */ map_key_clear(KEY_FN_ESC); return 1; case 9: /* Fn-F4: Mic mute */ map_key_clear(LENOVO_KEY_MICMUTE); return 1; case 10: /* Fn-F7: Control panel */ map_key_clear(KEY_CONFIG); return 1; case 11: /* Fn-F8: Search (magnifier glass) */ map_key_clear(KEY_SEARCH); return 1; case 12: /* Fn-F10: Open My computer (6 boxes) */ map_key_clear(KEY_FILE); return 1; } } /* * The Ultrabook Keyboard sends a spurious F23 key-press when resuming * from suspend and it does not actually have a F23 key, ignore it. */ if (usage->hid == 0x00070072) return -1; return 0; } static int lenovo_input_mapping_x1_tab_kbd(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* * The ThinkPad X1 Tablet Thin Keyboard uses 0x000c0001 usage for * a bunch of keys which have no standard consumer page code. */ if (usage->hid == 0x000c0001) { switch (usage->usage_index) { case 0: /* Fn-F10: Enable/disable bluetooth */ map_key_clear(KEY_BLUETOOTH); return 1; case 1: /* Fn-F11: Keyboard settings */ map_key_clear(KEY_KEYBOARD); return 1; case 2: /* Fn-F12: User function / Cortana */ map_key_clear(KEY_MACRO1); return 1; case 3: /* Fn-PrtSc: Snipping tool */ map_key_clear(KEY_SELECTIVE_SCREENSHOT); return 1; case 8: /* Fn-Esc: Fn-lock toggle */ map_key_clear(KEY_FN_ESC); return 1; case 9: /* Fn-F4: Mute/unmute microphone */ map_key_clear(KEY_MICMUTE); return 1; case 10: /* Fn-F9: Settings */ map_key_clear(KEY_CONFIG); return 1; case 13: /* Fn-F7: Manage external displays */ map_key_clear(KEY_SWITCHVIDEOMODE); return 1; case 14: /* Fn-F8: Enable/disable wifi */ map_key_clear(KEY_WLAN); return 1; } } if (usage->hid == (HID_UP_KEYBOARD | 0x009a)) { map_key_clear(KEY_SYSRQ); return 1; } return 0; } static int lenovo_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { switch (hdev->product) { case USB_DEVICE_ID_LENOVO_TPKBD: return lenovo_input_mapping_tpkbd(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_CBTKBD: return lenovo_input_mapping_cptkbd(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_LENOVO_TPIIUSBKBD: case USB_DEVICE_ID_LENOVO_TPIIBTKBD: return lenovo_input_mapping_tpIIkbd(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_IBM_SCROLLPOINT_III: case USB_DEVICE_ID_IBM_SCROLLPOINT_PRO: case USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL: case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL: case USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO: case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL: return lenovo_input_mapping_scrollpoint(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_LENOVO_TP10UBKBD: return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_LENOVO_X12_TAB: case USB_DEVICE_ID_LENOVO_X12_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB: case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max); default: return 0; } } #undef map_key_clear /* Send a config command to the keyboard */ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev, unsigned char byte2, unsigned char byte3) { int ret; unsigned char *buf; buf = kzalloc(3, GFP_KERNEL); if (!buf) return -ENOMEM; /* * Feature report 0x13 is used for USB, * output report 0x18 is used for Bluetooth. * buf[0] is ignored by hid_hw_raw_request. */ buf[0] = 0x18; buf[1] = byte2; buf[2] = byte3; switch (hdev->product) { case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_TPIIUSBKBD: ret = hid_hw_raw_request(hdev, 0x13, buf, 3, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); break; case USB_DEVICE_ID_LENOVO_CBTKBD: case USB_DEVICE_ID_LENOVO_TPIIBTKBD: ret = hid_hw_output_report(hdev, buf, 3); break; default: ret = -EINVAL; break; } kfree(buf); return ret < 0 ? ret : 0; /* BT returns 0, USB returns sizeof(buf) */ } static void lenovo_features_set_cptkbd(struct hid_device *hdev) { int ret; struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); /* * Tell the keyboard a driver understands it, and turn F7, F9, F11 into * regular keys */ ret = lenovo_send_cmd_cptkbd(hdev, 0x01, 0x03); if (ret) hid_warn(hdev, "Failed to switch F7/9/11 mode: %d\n", ret); /* Switch middle button to native mode */ ret = lenovo_send_cmd_cptkbd(hdev, 0x09, 0x01); if (ret) hid_warn(hdev, "Failed to switch middle button: %d\n", ret); ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock); if (ret) hid_err(hdev, "Fn-lock setting failed: %d\n", ret); ret = lenovo_send_cmd_cptkbd(hdev, 0x02, cptkbd_data->sensitivity); if (ret) hid_err(hdev, "Sensitivity setting failed: %d\n", ret); } static ssize_t attr_fn_lock_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", data->fn_lock); } static ssize_t attr_fn_lock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data = hid_get_drvdata(hdev); int value, ret; if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; data->fn_lock = !!value; switch (hdev->product) { case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_CBTKBD: case USB_DEVICE_ID_LENOVO_TPIIUSBKBD: case USB_DEVICE_ID_LENOVO_TPIIBTKBD: lenovo_features_set_cptkbd(hdev); break; case USB_DEVICE_ID_LENOVO_X12_TAB: case USB_DEVICE_ID_LENOVO_X12_TAB2: case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value); if (ret) return ret; break; } return count; } static ssize_t attr_sensitivity_show_cptkbd(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", cptkbd_data->sensitivity); } static ssize_t attr_sensitivity_store_cptkbd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value) || value < 1 || value > 255) return -EINVAL; cptkbd_data->sensitivity = value; lenovo_features_set_cptkbd(hdev); return count; } static ssize_t attr_middleclick_workaround_show_cptkbd(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", cptkbd_data->middleclick_workaround_cptkbd); } static ssize_t attr_middleclick_workaround_store_cptkbd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; cptkbd_data->middleclick_workaround_cptkbd = !!value; return count; } static struct device_attribute dev_attr_fn_lock = __ATTR(fn_lock, S_IWUSR | S_IRUGO, attr_fn_lock_show, attr_fn_lock_store); static struct device_attribute dev_attr_sensitivity_cptkbd = __ATTR(sensitivity, S_IWUSR | S_IRUGO, attr_sensitivity_show_cptkbd, attr_sensitivity_store_cptkbd); static struct device_attribute dev_attr_middleclick_workaround_cptkbd = __ATTR(middleclick_workaround, S_IWUSR | S_IRUGO, attr_middleclick_workaround_show_cptkbd, attr_middleclick_workaround_store_cptkbd); static struct attribute *lenovo_attributes_cptkbd[] = { &dev_attr_fn_lock.attr, &dev_attr_sensitivity_cptkbd.attr, &dev_attr_middleclick_workaround_cptkbd.attr, NULL }; static const struct attribute_group lenovo_attr_group_cptkbd = { .attrs = lenovo_attributes_cptkbd, }; /* Function to handle Lenovo Thinkpad TAB X12's HID raw inputs for fn keys*/ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data) { struct hid_input *hidinput; struct input_dev *input = NULL; /* Iterate through all associated input devices */ list_for_each_entry(hidinput, &hdev->inputs, list) { input = hidinput->input; if (!input) continue; switch (raw_data) { /* fn-F20 being used here for MIC mute*/ case TP_X12_RAW_HOTKEY_FN_F4: report_key_event(input, LENOVO_KEY_MICMUTE); return 1; /* Power-mode or Airplane mode will be called based on the device*/ case TP_X12_RAW_HOTKEY_FN_F8: /* * TP X12 TAB uses Fn-F8 calls Airplanemode * Whereas TP X12 TAB2 uses Fn-F8 for toggling * Power modes */ if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) { report_key_event(input, KEY_RFKILL); return 1; } #if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE) else { platform_profile_cycle(); return 1; } #endif /* CONFIG_ACPI_PLATFORM_PROFILE */ return 0; case TP_X12_RAW_HOTKEY_FN_F10: /* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/ (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) ? report_key_event(input, KEY_PICKUP_PHONE) : report_key_event(input, KEY_SELECTIVE_SCREENSHOT); return 1; case TP_X12_RAW_HOTKEY_FN_F12: /* BookMarks/STAR key*/ report_key_event(input, KEY_BOOKMARKS); return 1; case TP_X12_RAW_HOTKEY_FN_SPACE: /* Keyboard LED backlight toggle*/ report_key_event(input, KEY_KBDILLUMTOGGLE); return 1; default: break; } } return 0; } static int lenovo_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { /* * Compact USB keyboard's Fn-F12 report holds down many other keys, and * its own key is outside the usage page range. Remove extra * keypresses and remap to inside usage page. */ if (unlikely(hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD && size == 3 && data[0] == 0x15 && data[1] == 0x94 && data[2] == 0x01)) { data[1] = 0x00; data[2] = 0x01; } /* * Lenovo TP X12 Tab KBD's Fn+XX is HID raw data defined. Report ID is 0x03 * e.g.: Raw data received for MIC mute is 0x00020003. */ if (unlikely((hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB || hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2) && size >= 3 && report->id == 0x03)) return lenovo_raw_event_TP_X12_tab(hdev, le32_to_cpu(*(u32 *)data)); return 0; } static int lenovo_event_tp10ubkbd(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct lenovo_drvdata *data = hid_get_drvdata(hdev); if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) { /* * The user has toggled the Fn-lock state. Toggle our own * cached value of it and sync our value to the keyboard to * ensure things are in sync (the sycning should be a no-op). */ data->fn_lock = !data->fn_lock; schedule_work(&data->fn_lock_sync_work); } return 0; } static int lenovo_event_cptkbd(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); if (cptkbd_data->middleclick_workaround_cptkbd) { /* "wheel" scroll events */ if (usage->type == EV_REL && (usage->code == REL_WHEEL || usage->code == REL_HWHEEL)) { /* Scroll events disable middle-click event */ cptkbd_data->middlebutton_state = 2; return 0; } /* Middle click events */ if (usage->type == EV_KEY && usage->code == BTN_MIDDLE) { if (value == 1) { cptkbd_data->middlebutton_state = 1; } else if (value == 0) { if (cptkbd_data->middlebutton_state == 1) { /* No scrolling inbetween, send middle-click */ input_event(field->hidinput->input, EV_KEY, BTN_MIDDLE, 1); input_sync(field->hidinput->input); input_event(field->hidinput->input, EV_KEY, BTN_MIDDLE, 0); input_sync(field->hidinput->input); } cptkbd_data->middlebutton_state = 0; } return 1; } } if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) { /* * The user has toggled the Fn-lock state. Toggle our own * cached value of it and sync our value to the keyboard to * ensure things are in sync (the syncing should be a no-op). */ cptkbd_data->fn_lock = !cptkbd_data->fn_lock; } return 0; } static int lenovo_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { if (!hid_get_drvdata(hdev)) return 0; switch (hdev->product) { case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_CBTKBD: case USB_DEVICE_ID_LENOVO_TPIIUSBKBD: case USB_DEVICE_ID_LENOVO_TPIIBTKBD: return lenovo_event_cptkbd(hdev, field, usage, value); case USB_DEVICE_ID_LENOVO_X12_TAB: case USB_DEVICE_ID_LENOVO_X12_TAB2: case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_event_tp10ubkbd(hdev, field, usage, value); default: return 0; } } static int lenovo_features_set_tpkbd(struct hid_device *hdev) { struct hid_report *report; struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4]; report->field[0]->value[0] = data_pointer->press_to_select ? 0x01 : 0x02; report->field[0]->value[0] |= data_pointer->dragging ? 0x04 : 0x08; report->field[0]->value[0] |= data_pointer->release_to_select ? 0x10 : 0x20; report->field[0]->value[0] |= data_pointer->select_right ? 0x80 : 0x40; report->field[1]->value[0] = 0x03; // unknown setting, imitate windows driver report->field[2]->value[0] = data_pointer->sensitivity; report->field[3]->value[0] = data_pointer->press_speed; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); return 0; } static ssize_t attr_press_to_select_show_tpkbd(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", data_pointer->press_to_select); } static ssize_t attr_press_to_select_store_tpkbd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; data_pointer->press_to_select = value; lenovo_features_set_tpkbd(hdev); return count; } static ssize_t attr_dragging_show_tpkbd(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", data_pointer->dragging); } static ssize_t attr_dragging_store_tpkbd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; data_pointer->dragging = value; lenovo_features_set_tpkbd(hdev); return count; } static ssize_t attr_release_to_select_show_tpkbd(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", data_pointer->release_to_select); } static ssize_t attr_release_to_select_store_tpkbd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; data_pointer->release_to_select = value; lenovo_features_set_tpkbd(hdev); return count; } static ssize_t attr_select_right_show_tpkbd(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", data_pointer->select_right); } static ssize_t attr_select_right_store_tpkbd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) return -EINVAL; data_pointer->select_right = value; lenovo_features_set_tpkbd(hdev); return count; } static ssize_t attr_sensitivity_show_tpkbd(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", data_pointer->sensitivity); } static ssize_t attr_sensitivity_store_tpkbd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value) || value < 1 || value > 255) return -EINVAL; data_pointer->sensitivity = value; lenovo_features_set_tpkbd(hdev); return count; } static ssize_t attr_press_speed_show_tpkbd(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return sysfs_emit(buf, "%u\n", data_pointer->press_speed); } static ssize_t attr_press_speed_store_tpkbd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value) || value < 1 || value > 255) return -EINVAL; data_pointer->press_speed = value; lenovo_features_set_tpkbd(hdev); return count; } static struct device_attribute dev_attr_press_to_select_tpkbd = __ATTR(press_to_select, S_IWUSR | S_IRUGO, attr_press_to_select_show_tpkbd, attr_press_to_select_store_tpkbd); static struct device_attribute dev_attr_dragging_tpkbd = __ATTR(dragging, S_IWUSR | S_IRUGO, attr_dragging_show_tpkbd, attr_dragging_store_tpkbd); static struct device_attribute dev_attr_release_to_select_tpkbd = __ATTR(release_to_select, S_IWUSR | S_IRUGO, attr_release_to_select_show_tpkbd, attr_release_to_select_store_tpkbd); static struct device_attribute dev_attr_select_right_tpkbd = __ATTR(select_right, S_IWUSR | S_IRUGO, attr_select_right_show_tpkbd, attr_select_right_store_tpkbd); static struct device_attribute dev_attr_sensitivity_tpkbd = __ATTR(sensitivity, S_IWUSR | S_IRUGO, attr_sensitivity_show_tpkbd, attr_sensitivity_store_tpkbd); static struct device_attribute dev_attr_press_speed_tpkbd = __ATTR(press_speed, S_IWUSR | S_IRUGO, attr_press_speed_show_tpkbd, attr_press_speed_store_tpkbd); static struct attribute *lenovo_attributes_tpkbd[] = { &dev_attr_press_to_select_tpkbd.attr, &dev_attr_dragging_tpkbd.attr, &dev_attr_release_to_select_tpkbd.attr, &dev_attr_select_right_tpkbd.attr, &dev_attr_sensitivity_tpkbd.attr, &dev_attr_press_speed_tpkbd.attr, NULL }; static const struct attribute_group lenovo_attr_group_tpkbd = { .attrs = lenovo_attributes_tpkbd, }; static void lenovo_led_set_tpkbd(struct hid_device *hdev) { struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); struct hid_report *report; report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3]; report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1; report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1; hid_hw_request(hdev, report, HID_REQ_SET_REPORT); } static int lenovo_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { struct device *dev = led_cdev->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); static const u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED }; int led_nr = 0; int ret = 0; if (led_cdev == &data_pointer->led_micmute) led_nr = 1; if (value == LED_OFF) data_pointer->led_state &= ~(1 << led_nr); else data_pointer->led_state |= 1 << led_nr; switch (hdev->product) { case USB_DEVICE_ID_LENOVO_TPKBD: lenovo_led_set_tpkbd(hdev); break; case USB_DEVICE_ID_LENOVO_X12_TAB: case USB_DEVICE_ID_LENOVO_X12_TAB2: case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value); break; } return ret; } static int lenovo_register_leds(struct hid_device *hdev) { struct lenovo_drvdata *data = hid_get_drvdata(hdev); size_t name_sz = strlen(dev_name(&hdev->dev)) + 16; char *name_mute, *name_micm; int ret; name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); name_micm = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); if (name_mute == NULL || name_micm == NULL) { hid_err(hdev, "Could not allocate memory for led data\n"); return -ENOMEM; } snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(&hdev->dev)); snprintf(name_micm, name_sz, "%s:amber:micmute", dev_name(&hdev->dev)); data->led_mute.name = name_mute; data->led_mute.default_trigger = "audio-mute"; data->led_mute.brightness_set_blocking = lenovo_led_brightness_set; data->led_mute.max_brightness = 1; data->led_mute.flags = LED_HW_PLUGGABLE; data->led_mute.dev = &hdev->dev; ret = led_classdev_register(&hdev->dev, &data->led_mute); if (ret < 0) return ret; data->led_micmute.name = name_micm; data->led_micmute.default_trigger = "audio-micmute"; data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set; data->led_micmute.max_brightness = 1; data->led_micmute.flags = LED_HW_PLUGGABLE; data->led_micmute.dev = &hdev->dev; ret = led_classdev_register(&hdev->dev, &data->led_micmute); if (ret < 0) { led_classdev_unregister(&data->led_mute); return ret; } return 0; } static int lenovo_probe_tpkbd(struct hid_device *hdev) { struct lenovo_drvdata *data_pointer; int i, ret; /* * Only register extra settings against subdevice where input_mapping * set drvdata to 1, i.e. the trackpoint. */ if (!hid_get_drvdata(hdev)) return 0; hid_set_drvdata(hdev, NULL); /* Validate required reports. */ for (i = 0; i < 4; i++) { if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1)) return -ENODEV; } if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2)) return -ENODEV; ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_tpkbd); if (ret) hid_warn(hdev, "Could not create sysfs group: %d\n", ret); data_pointer = devm_kzalloc(&hdev->dev, sizeof(struct lenovo_drvdata), GFP_KERNEL); if (data_pointer == NULL) { hid_err(hdev, "Could not allocate memory for driver data\n"); ret = -ENOMEM; goto err; } // set same default values as windows driver data_pointer->sensitivity = 0xa0; data_pointer->press_speed = 0x38; hid_set_drvdata(hdev, data_pointer); ret = lenovo_register_leds(hdev); if (ret) goto err; lenovo_features_set_tpkbd(hdev); return 0; err: sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tpkbd); return ret; } static int lenovo_probe_cptkbd(struct hid_device *hdev) { int ret; struct lenovo_drvdata *cptkbd_data; /* All the custom action happens on the USBMOUSE device for USB */ if (((hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD) || (hdev->product == USB_DEVICE_ID_LENOVO_TPIIUSBKBD)) && hdev->type != HID_TYPE_USBMOUSE) { hid_dbg(hdev, "Ignoring keyboard half of device\n"); return 0; } cptkbd_data = devm_kzalloc(&hdev->dev, sizeof(*cptkbd_data), GFP_KERNEL); if (cptkbd_data == NULL) { hid_err(hdev, "can't alloc keyboard descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, cptkbd_data); /* Set keyboard settings to known state */ cptkbd_data->middlebutton_state = 0; cptkbd_data->fn_lock = true; cptkbd_data->sensitivity = 0x05; cptkbd_data->middleclick_workaround_cptkbd = true; lenovo_features_set_cptkbd(hdev); ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd); if (ret) hid_warn(hdev, "Could not create sysfs group: %d\n", ret); return 0; } static struct attribute *lenovo_attributes_tp10ubkbd[] = { &dev_attr_fn_lock.attr, NULL }; static const struct attribute_group lenovo_attr_group_tp10ubkbd = { .attrs = lenovo_attributes_tp10ubkbd, }; static int lenovo_probe_tp10ubkbd(struct hid_device *hdev) { struct hid_report_enum *rep_enum; struct lenovo_drvdata *data; struct hid_report *rep; bool found; int ret; /* * The LEDs and the Fn-lock functionality use output report 9, * with an application of 0xffa0001, add the LEDs on the interface * with this output report. */ found = false; rep_enum = &hdev->report_enum[HID_OUTPUT_REPORT]; list_for_each_entry(rep, &rep_enum->report_list, list) { if (rep->application == 0xffa00001) found = true; } if (!found) return 0; data = devm_kzalloc(&hdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; mutex_init(&data->led_report_mutex); INIT_WORK(&data->fn_lock_sync_work, lenovo_tp10ubkbd_sync_fn_lock); data->hdev = hdev; hid_set_drvdata(hdev, data); /* * The Thinkpad 10 ultrabook USB kbd dock's Fn-lock defaults to on. * We cannot read the state, only set it, so we force it to on here * (which should be a no-op) to make sure that our state matches the * keyboard's FN-lock state. This is the same as what Windows does. * * For X12 TAB and TAB2, the default windows behaviour Fn-lock Off. * Adding additional check to ensure the behaviour in case of * Thinkpad X12 Tabs. */ data->fn_lock = !(hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB || hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB2); lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, data->fn_lock); ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd); if (ret) return ret; ret = lenovo_register_leds(hdev); if (ret) goto err; return 0; err: sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd); return ret; } static int lenovo_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "hid_parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hid_hw_start failed\n"); goto err; } switch (hdev->product) { case USB_DEVICE_ID_LENOVO_TPKBD: ret = lenovo_probe_tpkbd(hdev); break; case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_CBTKBD: case USB_DEVICE_ID_LENOVO_TPIIUSBKBD: case USB_DEVICE_ID_LENOVO_TPIIBTKBD: ret = lenovo_probe_cptkbd(hdev); break; case USB_DEVICE_ID_LENOVO_X12_TAB: case USB_DEVICE_ID_LENOVO_X12_TAB2: case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_probe_tp10ubkbd(hdev); break; default: ret = 0; break; } if (ret) goto err_hid; return 0; err_hid: hid_hw_stop(hdev); err: return ret; } #ifdef CONFIG_PM static int lenovo_reset_resume(struct hid_device *hdev) { switch (hdev->product) { case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_TPIIUSBKBD: if (hdev->type == HID_TYPE_USBMOUSE) lenovo_features_set_cptkbd(hdev); break; default: break; } return 0; } #endif static void lenovo_remove_tpkbd(struct hid_device *hdev) { struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); /* * Only the trackpoint half of the keyboard has drvdata and stuff that * needs unregistering. */ if (data_pointer == NULL) return; sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tpkbd); led_classdev_unregister(&data_pointer->led_micmute); led_classdev_unregister(&data_pointer->led_mute); } static void lenovo_remove_cptkbd(struct hid_device *hdev) { sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_cptkbd); } static void lenovo_remove_tp10ubkbd(struct hid_device *hdev) { struct lenovo_drvdata *data = hid_get_drvdata(hdev); if (data == NULL) return; led_classdev_unregister(&data->led_micmute); led_classdev_unregister(&data->led_mute); sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd); cancel_work_sync(&data->fn_lock_sync_work); } static void lenovo_remove(struct hid_device *hdev) { switch (hdev->product) { case USB_DEVICE_ID_LENOVO_TPKBD: lenovo_remove_tpkbd(hdev); break; case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_CBTKBD: case USB_DEVICE_ID_LENOVO_TPIIUSBKBD: case USB_DEVICE_ID_LENOVO_TPIIBTKBD: lenovo_remove_cptkbd(hdev); break; case USB_DEVICE_ID_LENOVO_X12_TAB: case USB_DEVICE_ID_LENOVO_X12_TAB2: case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: case USB_DEVICE_ID_LENOVO_X1_TAB3: lenovo_remove_tp10ubkbd(hdev); break; } hid_hw_stop(hdev); } static int lenovo_input_configured(struct hid_device *hdev, struct hid_input *hi) { switch (hdev->product) { case USB_DEVICE_ID_LENOVO_TPKBD: case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_CBTKBD: case USB_DEVICE_ID_LENOVO_TPIIUSBKBD: case USB_DEVICE_ID_LENOVO_TPIIBTKBD: if (test_bit(EV_REL, hi->input->evbit)) { /* set only for trackpoint device */ __set_bit(INPUT_PROP_POINTER, hi->input->propbit); __set_bit(INPUT_PROP_POINTING_STICK, hi->input->propbit); } break; } return 0; } static const struct hid_device_id lenovo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPIIUSBKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPIIBTKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_III) }, { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_OPTICAL) }, { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) }, { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TP10UBKBD) }, /* * Note bind to the HID_GROUP_GENERIC group, so that we only bind to the keyboard * part, while letting hid-multitouch.c handle the touchpad and trackpoint. */ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB2) }, { } }; MODULE_DEVICE_TABLE(hid, lenovo_devices); static struct hid_driver lenovo_driver = { .name = "lenovo", .id_table = lenovo_devices, .input_configured = lenovo_input_configured, .input_mapping = lenovo_input_mapping, .probe = lenovo_probe, .remove = lenovo_remove, .raw_event = lenovo_raw_event, .event = lenovo_event, .report_fixup = lenovo_report_fixup, #ifdef CONFIG_PM .reset_resume = lenovo_reset_resume, #endif }; module_hid_driver(lenovo_driver); MODULE_DESCRIPTION("HID driver for IBM/Lenovo"); MODULE_LICENSE("GPL");
10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/sunrpc/addr.h * * Various routines for copying and comparing sockaddrs and for * converting them to and from presentation format. */ #ifndef _LINUX_SUNRPC_ADDR_H #define _LINUX_SUNRPC_ADDR_H #include <linux/socket.h> #include <linux/in.h> #include <linux/in6.h> #include <net/ipv6.h> size_t rpc_ntop(const struct sockaddr *, char *, const size_t); size_t rpc_pton(struct net *, const char *, const size_t, struct sockaddr *, const size_t); char * rpc_sockaddr2uaddr(const struct sockaddr *, gfp_t); size_t rpc_uaddr2sockaddr(struct net *, const char *, const size_t, struct sockaddr *, const size_t); static inline unsigned short rpc_get_port(const struct sockaddr *sap) { switch (sap->sa_family) { case AF_INET: return ntohs(((struct sockaddr_in *)sap)->sin_port); case AF_INET6: return ntohs(((struct sockaddr_in6 *)sap)->sin6_port); } return 0; } static inline void rpc_set_port(struct sockaddr *sap, const unsigned short port) { switch (sap->sa_family) { case AF_INET: ((struct sockaddr_in *)sap)->sin_port = htons(port); break; case AF_INET6: ((struct sockaddr_in6 *)sap)->sin6_port = htons(port); break; } } #define IPV6_SCOPE_DELIMITER '%' #define IPV6_SCOPE_ID_LEN sizeof("%nnnnnnnnnn") static inline bool rpc_cmp_addr4(const struct sockaddr *sap1, const struct sockaddr *sap2) { const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sap1; const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sap2; return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; } static inline bool __rpc_copy_addr4(struct sockaddr *dst, const struct sockaddr *src) { const struct sockaddr_in *ssin = (struct sockaddr_in *) src; struct sockaddr_in *dsin = (struct sockaddr_in *) dst; dsin->sin_family = ssin->sin_family; dsin->sin_addr.s_addr = ssin->sin_addr.s_addr; return true; } #if IS_ENABLED(CONFIG_IPV6) static inline bool rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sap1; const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sap2; if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr)) return false; else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL) return sin1->sin6_scope_id == sin2->sin6_scope_id; return true; } static inline bool __rpc_copy_addr6(struct sockaddr *dst, const struct sockaddr *src) { const struct sockaddr_in6 *ssin6 = (const struct sockaddr_in6 *) src; struct sockaddr_in6 *dsin6 = (struct sockaddr_in6 *) dst; dsin6->sin6_family = ssin6->sin6_family; dsin6->sin6_addr = ssin6->sin6_addr; dsin6->sin6_scope_id = ssin6->sin6_scope_id; return true; } #else /* !(IS_ENABLED(CONFIG_IPV6) */ static inline bool rpc_cmp_addr6(const struct sockaddr *sap1, const struct sockaddr *sap2) { return false; } static inline bool __rpc_copy_addr6(struct sockaddr *dst, const struct sockaddr *src) { return false; } #endif /* !(IS_ENABLED(CONFIG_IPV6) */ /** * rpc_cmp_addr - compare the address portion of two sockaddrs. * @sap1: first sockaddr * @sap2: second sockaddr * * Just compares the family and address portion. Ignores port, but * compares the scope if it's a link-local address. * * Returns true if the addrs are equal, false if they aren't. */ static inline bool rpc_cmp_addr(const struct sockaddr *sap1, const struct sockaddr *sap2) { if (sap1->sa_family == sap2->sa_family) { switch (sap1->sa_family) { case AF_INET: return rpc_cmp_addr4(sap1, sap2); case AF_INET6: return rpc_cmp_addr6(sap1, sap2); } } return false; } /** * rpc_cmp_addr_port - compare the address and port number of two sockaddrs. * @sap1: first sockaddr * @sap2: second sockaddr */ static inline bool rpc_cmp_addr_port(const struct sockaddr *sap1, const struct sockaddr *sap2) { if (!rpc_cmp_addr(sap1, sap2)) return false; return rpc_get_port(sap1) == rpc_get_port(sap2); } /** * rpc_copy_addr - copy the address portion of one sockaddr to another * @dst: destination sockaddr * @src: source sockaddr * * Just copies the address portion and family. Ignores port, scope, etc. * Caller is responsible for making certain that dst is large enough to hold * the address in src. Returns true if address family is supported. Returns * false otherwise. */ static inline bool rpc_copy_addr(struct sockaddr *dst, const struct sockaddr *src) { switch (src->sa_family) { case AF_INET: return __rpc_copy_addr4(dst, src); case AF_INET6: return __rpc_copy_addr6(dst, src); } return false; } /** * rpc_get_scope_id - return scopeid for a given sockaddr * @sa: sockaddr to get scopeid from * * Returns the value of the sin6_scope_id for AF_INET6 addrs, or 0 if * not an AF_INET6 address. */ static inline u32 rpc_get_scope_id(const struct sockaddr *sa) { if (sa->sa_family != AF_INET6) return 0; return ((struct sockaddr_in6 *) sa)->sin6_scope_id; } #endif /* _LINUX_SUNRPC_ADDR_H */
18 40 34 35 35 35 79 96 35 79 21 21 21 20 21 21 21 7 21 17 14 21 21 28 28 21 10 21 21 21 28 21 30 30 30 37 7 61 61 61 35 35 35 8 35 35 18 18 18 18 18 18 55 34 55 55 55 33 34 6 34 33 55 47 45 2 55 3 1 1 3 3 3 3 3 49 49 48 49 49 49 49 33 28 53 36 80 79 48 31 31 18 26 14 27 5 3 27 2 27 26 27 27 26 27 79 53 53 53 4 52 54 54 52 53 53 1 53 53 52 34 29 33 19 10 5 10 13 34 34 34 34 34 34 34 26 34 34 10 3 3 3 3 3 3 49 49 49 5 5 5 5 5 14 15 15 15 14 14 14 7 5 7 7 7 2 11 36 37 31 37 37 36 37 23 37 13 37 31 30 30 19 29 21 21 11 11 29 30 49 37 49 49 49 48 48 49 48 48 3 31 48 49 12 49 49 49 36 49 16 48 49 12 37 43 47 48 1 37 37 37 37 49 49 48 47 12 1 12 37 42 42 42 42 39 1 39 39 39 16 39 37 39 23 39 19 39 4 4 3 4 38 39 25 42 42 42 42 41 42 42 38 41 1 1 1 1 1 1 1 1 1 2 1 1 1 2 2 2 2 2 4 4 4 3 2 2 2 2 2 2 2 2 2 4 4 2 21 21 20 21 21 21 21 21 21 21 21 8 33 33 9 25 5 5 5 5 34 33 31 33 33 33 31 2 33 15 15 3 3 3 3 3 3 3 3 3 3 3 3 3 3 31 25 25 25 26 26 31 27 27 27 4 27 11 11 11 10 10 10 27 27 26 27 27 26 20 27 27 28 27 27 10 27 27 27 2 2 28 17 17 17 17 28 28 28 28 28 28 28 28 21 8 8 21 21 28 28 28 28 27 28 27 28 28 21 10 1 28 31 31 31 28 30 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2010 Red Hat, Inc. * Copyright (C) 2016-2023 Christoph Hellwig. */ #include <linux/module.h> #include <linux/compiler.h> #include <linux/fs.h> #include <linux/iomap.h> #include <linux/pagemap.h> #include <linux/uio.h> #include <linux/buffer_head.h> #include <linux/dax.h> #include <linux/writeback.h> #include <linux/list_sort.h> #include <linux/swap.h> #include <linux/bio.h> #include <linux/sched/signal.h> #include <linux/migrate.h> #include "trace.h" #include "../internal.h" #define IOEND_BATCH_SIZE 4096 /* * Structure allocated for each folio to track per-block uptodate, dirty state * and I/O completions. */ struct iomap_folio_state { spinlock_t state_lock; unsigned int read_bytes_pending; atomic_t write_bytes_pending; /* * Each block has two bits in this bitmap: * Bits [0..blocks_per_folio) has the uptodate status. * Bits [b_p_f...(2*b_p_f)) has the dirty status. */ unsigned long state[]; }; static struct bio_set iomap_ioend_bioset; static inline bool ifs_is_fully_uptodate(struct folio *folio, struct iomap_folio_state *ifs) { struct inode *inode = folio->mapping->host; return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio)); } static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs, unsigned int block) { return test_bit(block, ifs->state); } static bool ifs_set_range_uptodate(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int first_blk = off >> inode->i_blkbits; unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; bitmap_set(ifs->state, first_blk, nr_blks); return ifs_is_fully_uptodate(folio, ifs); } static void iomap_set_range_uptodate(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; unsigned long flags; bool uptodate = true; if (ifs) { spin_lock_irqsave(&ifs->state_lock, flags); uptodate = ifs_set_range_uptodate(folio, ifs, off, len); spin_unlock_irqrestore(&ifs->state_lock, flags); } if (uptodate) folio_mark_uptodate(folio); } static inline bool ifs_block_is_dirty(struct folio *folio, struct iomap_folio_state *ifs, int block) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); return test_bit(block + blks_per_folio, ifs->state); } static unsigned ifs_find_dirty_range(struct folio *folio, struct iomap_folio_state *ifs, u64 *range_start, u64 range_end) { struct inode *inode = folio->mapping->host; unsigned start_blk = offset_in_folio(folio, *range_start) >> inode->i_blkbits; unsigned end_blk = min_not_zero( offset_in_folio(folio, range_end) >> inode->i_blkbits, i_blocks_per_folio(inode, folio)); unsigned nblks = 1; while (!ifs_block_is_dirty(folio, ifs, start_blk)) if (++start_blk == end_blk) return 0; while (start_blk + nblks < end_blk) { if (!ifs_block_is_dirty(folio, ifs, start_blk + nblks)) break; nblks++; } *range_start = folio_pos(folio) + (start_blk << inode->i_blkbits); return nblks << inode->i_blkbits; } static unsigned iomap_find_dirty_range(struct folio *folio, u64 *range_start, u64 range_end) { struct iomap_folio_state *ifs = folio->private; if (*range_start >= range_end) return 0; if (ifs) return ifs_find_dirty_range(folio, ifs, range_start, range_end); return range_end - *range_start; } static void ifs_clear_range_dirty(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); unsigned int first_blk = (off >> inode->i_blkbits); unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks); spin_unlock_irqrestore(&ifs->state_lock, flags); } static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; if (ifs) ifs_clear_range_dirty(folio, ifs, off, len); } static void ifs_set_range_dirty(struct folio *folio, struct iomap_folio_state *ifs, size_t off, size_t len) { struct inode *inode = folio->mapping->host; unsigned int blks_per_folio = i_blocks_per_folio(inode, folio); unsigned int first_blk = (off >> inode->i_blkbits); unsigned int last_blk = (off + len - 1) >> inode->i_blkbits; unsigned int nr_blks = last_blk - first_blk + 1; unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks); spin_unlock_irqrestore(&ifs->state_lock, flags); } static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len) { struct iomap_folio_state *ifs = folio->private; if (ifs) ifs_set_range_dirty(folio, ifs, off, len); } static struct iomap_folio_state *ifs_alloc(struct inode *inode, struct folio *folio, unsigned int flags) { struct iomap_folio_state *ifs = folio->private; unsigned int nr_blocks = i_blocks_per_folio(inode, folio); gfp_t gfp; if (ifs || nr_blocks <= 1) return ifs; if (flags & IOMAP_NOWAIT) gfp = GFP_NOWAIT; else gfp = GFP_NOFS | __GFP_NOFAIL; /* * ifs->state tracks two sets of state flags when the * filesystem block size is smaller than the folio size. * The first state tracks per-block uptodate and the * second tracks per-block dirty state. */ ifs = kzalloc(struct_size(ifs, state, BITS_TO_LONGS(2 * nr_blocks)), gfp); if (!ifs) return ifs; spin_lock_init(&ifs->state_lock); if (folio_test_uptodate(folio)) bitmap_set(ifs->state, 0, nr_blocks); if (folio_test_dirty(folio)) bitmap_set(ifs->state, nr_blocks, nr_blocks); folio_attach_private(folio, ifs); return ifs; } static void ifs_free(struct folio *folio) { struct iomap_folio_state *ifs = folio_detach_private(folio); if (!ifs) return; WARN_ON_ONCE(ifs->read_bytes_pending != 0); WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending)); WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) != folio_test_uptodate(folio)); kfree(ifs); } /* * Calculate the range inside the folio that we actually need to read. */ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, loff_t *pos, loff_t length, size_t *offp, size_t *lenp) { struct iomap_folio_state *ifs = folio->private; loff_t orig_pos = *pos; loff_t isize = i_size_read(inode); unsigned block_bits = inode->i_blkbits; unsigned block_size = (1 << block_bits); size_t poff = offset_in_folio(folio, *pos); size_t plen = min_t(loff_t, folio_size(folio) - poff, length); size_t orig_plen = plen; unsigned first = poff >> block_bits; unsigned last = (poff + plen - 1) >> block_bits; /* * If the block size is smaller than the page size, we need to check the * per-block uptodate status and adjust the offset and length if needed * to avoid reading in already uptodate ranges. */ if (ifs) { unsigned int i; /* move forward for each leading block marked uptodate */ for (i = first; i <= last; i++) { if (!ifs_block_is_uptodate(ifs, i)) break; *pos += block_size; poff += block_size; plen -= block_size; first++; } /* truncate len if we find any trailing uptodate block(s) */ for ( ; i <= last; i++) { if (ifs_block_is_uptodate(ifs, i)) { plen -= (last - i + 1) * block_size; last = i - 1; break; } } } /* * If the extent spans the block that contains the i_size, we need to * handle both halves separately so that we properly zero data in the * page cache for blocks that are entirely outside of i_size. */ if (orig_pos <= isize && orig_pos + orig_plen > isize) { unsigned end = offset_in_folio(folio, isize - 1) >> block_bits; if (first <= end && last > end) plen -= (last - end) * block_size; } *offp = poff; *lenp = plen; } static void iomap_finish_folio_read(struct folio *folio, size_t off, size_t len, int error) { struct iomap_folio_state *ifs = folio->private; bool uptodate = !error; bool finished = true; if (ifs) { unsigned long flags; spin_lock_irqsave(&ifs->state_lock, flags); if (!error) uptodate = ifs_set_range_uptodate(folio, ifs, off, len); ifs->read_bytes_pending -= len; finished = !ifs->read_bytes_pending; spin_unlock_irqrestore(&ifs->state_lock, flags); } if (finished) folio_end_read(folio, uptodate); } static void iomap_read_end_io(struct bio *bio) { int error = blk_status_to_errno(bio->bi_status); struct folio_iter fi; bio_for_each_folio_all(fi, bio) iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error); bio_put(bio); } struct iomap_readpage_ctx { struct folio *cur_folio; bool cur_folio_in_bio; struct bio *bio; struct readahead_control *rac; }; /** * iomap_read_inline_data - copy inline data into the page cache * @iter: iteration structure * @folio: folio to copy to * * Copy the inline data in @iter into @folio and zero out the rest of the folio. * Only a single IOMAP_INLINE extent is allowed at the end of each file. * Returns zero for success to complete the read, or the usual negative errno. */ static int iomap_read_inline_data(const struct iomap_iter *iter, struct folio *folio) { const struct iomap *iomap = iomap_iter_srcmap(iter); size_t size = i_size_read(iter->inode) - iomap->offset; size_t offset = offset_in_folio(folio, iomap->offset); if (folio_test_uptodate(folio)) return 0; if (WARN_ON_ONCE(size > iomap->length)) return -EIO; if (offset > 0) ifs_alloc(iter->inode, folio, iter->flags); folio_fill_tail(folio, offset, iomap->inline_data, size); iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset); return 0; } static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter, loff_t pos) { const struct iomap *srcmap = iomap_iter_srcmap(iter); return srcmap->type != IOMAP_MAPPED || (srcmap->flags & IOMAP_F_NEW) || pos >= i_size_read(iter->inode); } static loff_t iomap_readpage_iter(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx, loff_t offset) { const struct iomap *iomap = &iter->iomap; loff_t pos = iter->pos + offset; loff_t length = iomap_length(iter) - offset; struct folio *folio = ctx->cur_folio; struct iomap_folio_state *ifs; loff_t orig_pos = pos; size_t poff, plen; sector_t sector; if (iomap->type == IOMAP_INLINE) return iomap_read_inline_data(iter, folio); /* zero post-eof blocks as the page may be mapped */ ifs = ifs_alloc(iter->inode, folio, iter->flags); iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); if (plen == 0) goto done; if (iomap_block_needs_zeroing(iter, pos)) { folio_zero_range(folio, poff, plen); iomap_set_range_uptodate(folio, poff, plen); goto done; } ctx->cur_folio_in_bio = true; if (ifs) { spin_lock_irq(&ifs->state_lock); ifs->read_bytes_pending += plen; spin_unlock_irq(&ifs->state_lock); } sector = iomap_sector(iomap, pos); if (!ctx->bio || bio_end_sector(ctx->bio) != sector || !bio_add_folio(ctx->bio, folio, plen, poff)) { gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL); gfp_t orig_gfp = gfp; unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE); if (ctx->bio) submit_bio(ctx->bio); if (ctx->rac) /* same as readahead_gfp_mask */ gfp |= __GFP_NORETRY | __GFP_NOWARN; ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ, gfp); /* * If the bio_alloc fails, try it again for a single page to * avoid having to deal with partial page reads. This emulates * what do_mpage_read_folio does. */ if (!ctx->bio) { ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp); } if (ctx->rac) ctx->bio->bi_opf |= REQ_RAHEAD; ctx->bio->bi_iter.bi_sector = sector; ctx->bio->bi_end_io = iomap_read_end_io; bio_add_folio_nofail(ctx->bio, folio, plen, poff); } done: /* * Move the caller beyond our range so that it keeps making progress. * For that, we have to include any leading non-uptodate ranges, but * we can skip trailing ones as they will be handled in the next * iteration. */ return pos - orig_pos + plen; } static loff_t iomap_read_folio_iter(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx) { struct folio *folio = ctx->cur_folio; size_t offset = offset_in_folio(folio, iter->pos); loff_t length = min_t(loff_t, folio_size(folio) - offset, iomap_length(iter)); loff_t done, ret; for (done = 0; done < length; done += ret) { ret = iomap_readpage_iter(iter, ctx, done); if (ret <= 0) return ret; } return done; } int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = folio->mapping->host, .pos = folio_pos(folio), .len = folio_size(folio), }; struct iomap_readpage_ctx ctx = { .cur_folio = folio, }; int ret; trace_iomap_readpage(iter.inode, 1); while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_read_folio_iter(&iter, &ctx); if (ctx.bio) { submit_bio(ctx.bio); WARN_ON_ONCE(!ctx.cur_folio_in_bio); } else { WARN_ON_ONCE(ctx.cur_folio_in_bio); folio_unlock(folio); } /* * Just like mpage_readahead and block_read_full_folio, we always * return 0 and just set the folio error flag on errors. This * should be cleaned up throughout the stack eventually. */ return 0; } EXPORT_SYMBOL_GPL(iomap_read_folio); static loff_t iomap_readahead_iter(const struct iomap_iter *iter, struct iomap_readpage_ctx *ctx) { loff_t length = iomap_length(iter); loff_t done, ret; for (done = 0; done < length; done += ret) { if (ctx->cur_folio && offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) { if (!ctx->cur_folio_in_bio) folio_unlock(ctx->cur_folio); ctx->cur_folio = NULL; } if (!ctx->cur_folio) { ctx->cur_folio = readahead_folio(ctx->rac); ctx->cur_folio_in_bio = false; } ret = iomap_readpage_iter(iter, ctx, done); if (ret <= 0) return ret; } return done; } /** * iomap_readahead - Attempt to read pages from a file. * @rac: Describes the pages to be read. * @ops: The operations vector for the filesystem. * * This function is for filesystems to call to implement their readahead * address_space operation. * * Context: The @ops callbacks may submit I/O (eg to read the addresses of * blocks from disc), and may wait for it. The caller may be trying to * access a different page, and so sleeping excessively should be avoided. * It may allocate memory, but should avoid costly allocations. This * function is called with memalloc_nofs set, so allocations will not cause * the filesystem to be reentered. */ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = rac->mapping->host, .pos = readahead_pos(rac), .len = readahead_length(rac), }; struct iomap_readpage_ctx ctx = { .rac = rac, }; trace_iomap_readahead(rac->mapping->host, readahead_count(rac)); while (iomap_iter(&iter, ops) > 0) iter.processed = iomap_readahead_iter(&iter, &ctx); if (ctx.bio) submit_bio(ctx.bio); if (ctx.cur_folio) { if (!ctx.cur_folio_in_bio) folio_unlock(ctx.cur_folio); } } EXPORT_SYMBOL_GPL(iomap_readahead); /* * iomap_is_partially_uptodate checks whether blocks within a folio are * uptodate or not. * * Returns true if all blocks which correspond to the specified part * of the folio are uptodate. */ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) { struct iomap_folio_state *ifs = folio->private; struct inode *inode = folio->mapping->host; unsigned first, last, i; if (!ifs) return false; /* Caller's range may extend past the end of this folio */ count = min(folio_size(folio) - from, count); /* First and last blocks in range within folio */ first = from >> inode->i_blkbits; last = (from + count - 1) >> inode->i_blkbits; for (i = first; i <= last; i++) if (!ifs_block_is_uptodate(ifs, i)) return false; return true; } EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); /** * iomap_get_folio - get a folio reference for writing * @iter: iteration structure * @pos: start offset of write * @len: Suggested size of folio to create. * * Returns a locked reference to the folio at @pos, or an error pointer if the * folio could not be obtained. */ struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) { fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS; if (iter->flags & IOMAP_NOWAIT) fgp |= FGP_NOWAIT; fgp |= fgf_set_order(len); return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, fgp, mapping_gfp_mask(iter->inode->i_mapping)); } EXPORT_SYMBOL_GPL(iomap_get_folio); bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) { trace_iomap_release_folio(folio->mapping->host, folio_pos(folio), folio_size(folio)); /* * If the folio is dirty, we refuse to release our metadata because * it may be partially dirty. Once we track per-block dirty state, * we can release the metadata if every block is dirty. */ if (folio_test_dirty(folio)) return false; ifs_free(folio); return true; } EXPORT_SYMBOL_GPL(iomap_release_folio); void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) { trace_iomap_invalidate_folio(folio->mapping->host, folio_pos(folio) + offset, len); /* * If we're invalidating the entire folio, clear the dirty state * from it and release it to avoid unnecessary buildup of the LRU. */ if (offset == 0 && len == folio_size(folio)) { WARN_ON_ONCE(folio_test_writeback(folio)); folio_cancel_dirty(folio); ifs_free(folio); } } EXPORT_SYMBOL_GPL(iomap_invalidate_folio); bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio) { struct inode *inode = mapping->host; size_t len = folio_size(folio); ifs_alloc(inode, folio, 0); iomap_set_range_dirty(folio, 0, len); return filemap_dirty_folio(mapping, folio); } EXPORT_SYMBOL_GPL(iomap_dirty_folio); static void iomap_write_failed(struct inode *inode, loff_t pos, unsigned len) { loff_t i_size = i_size_read(inode); /* * Only truncate newly allocated pages beyoned EOF, even if the * write started inside the existing inode size. */ if (pos + len > i_size) truncate_pagecache_range(inode, max(pos, i_size), pos + len - 1); } static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, size_t poff, size_t plen, const struct iomap *iomap) { struct bio_vec bvec; struct bio bio; bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ); bio.bi_iter.bi_sector = iomap_sector(iomap, block_start); bio_add_folio_nofail(&bio, folio, plen, poff); return submit_bio_wait(&bio); } static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, size_t len, struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); struct iomap_folio_state *ifs; loff_t block_size = i_blocksize(iter->inode); loff_t block_start = round_down(pos, block_size); loff_t block_end = round_up(pos + len, block_size); unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio); size_t from = offset_in_folio(folio, pos), to = from + len; size_t poff, plen; /* * If the write or zeroing completely overlaps the current folio, then * entire folio will be dirtied so there is no need for * per-block state tracking structures to be attached to this folio. * For the unshare case, we must read in the ondisk contents because we * are not changing pagecache contents. */ if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) && pos + len >= folio_pos(folio) + folio_size(folio)) return 0; ifs = ifs_alloc(iter->inode, folio, iter->flags); if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1) return -EAGAIN; if (folio_test_uptodate(folio)) return 0; do { iomap_adjust_read_range(iter->inode, folio, &block_start, block_end - block_start, &poff, &plen); if (plen == 0) break; if (!(iter->flags & IOMAP_UNSHARE) && (from <= poff || from >= poff + plen) && (to <= poff || to >= poff + plen)) continue; if (iomap_block_needs_zeroing(iter, block_start)) { if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE)) return -EIO; folio_zero_segments(folio, poff, from, to, poff + plen); } else { int status; if (iter->flags & IOMAP_NOWAIT) return -EAGAIN; status = iomap_read_folio_sync(block_start, folio, poff, plen, srcmap); if (status) return status; } iomap_set_range_uptodate(folio, poff, plen); } while ((block_start += plen) < block_end); return 0; } static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; if (folio_ops && folio_ops->get_folio) return folio_ops->get_folio(iter, pos, len); else return iomap_get_folio(iter, pos, len); } static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret, struct folio *folio) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; if (folio_ops && folio_ops->put_folio) { folio_ops->put_folio(iter->inode, pos, ret, folio); } else { folio_unlock(folio); folio_put(folio); } } static int iomap_write_begin_inline(const struct iomap_iter *iter, struct folio *folio) { /* needs more work for the tailpacking case; disable for now */ if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0)) return -EIO; return iomap_read_inline_data(iter, folio); } static int iomap_write_begin(struct iomap_iter *iter, loff_t pos, size_t len, struct folio **foliop) { const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); struct folio *folio; int status = 0; BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); if (srcmap != &iter->iomap) BUG_ON(pos + len > srcmap->offset + srcmap->length); if (fatal_signal_pending(current)) return -EINTR; if (!mapping_large_folio_support(iter->inode->i_mapping)) len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos)); folio = __iomap_get_folio(iter, pos, len); if (IS_ERR(folio)) return PTR_ERR(folio); /* * Now we have a locked folio, before we do anything with it we need to * check that the iomap we have cached is not stale. The inode extent * mapping can change due to concurrent IO in flight (e.g. * IOMAP_UNWRITTEN state can change and memory reclaim could have * reclaimed a previously partially written page at this index after IO * completion before this write reaches this file offset) and hence we * could do the wrong thing here (zero a page range incorrectly or fail * to zero) and corrupt data. */ if (folio_ops && folio_ops->iomap_valid) { bool iomap_valid = folio_ops->iomap_valid(iter->inode, &iter->iomap); if (!iomap_valid) { iter->iomap.flags |= IOMAP_F_STALE; status = 0; goto out_unlock; } } if (pos + len > folio_pos(folio) + folio_size(folio)) len = folio_pos(folio) + folio_size(folio) - pos; if (srcmap->type == IOMAP_INLINE) status = iomap_write_begin_inline(iter, folio); else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) status = __block_write_begin_int(folio, pos, len, NULL, srcmap); else status = __iomap_write_begin(iter, pos, len, folio); if (unlikely(status)) goto out_unlock; *foliop = folio; return 0; out_unlock: __iomap_put_folio(iter, pos, 0, folio); return status; } static bool __iomap_write_end(struct inode *inode, loff_t pos, size_t len, size_t copied, struct folio *folio) { flush_dcache_folio(folio); /* * The blocks that were entirely written will now be uptodate, so we * don't have to worry about a read_folio reading them and overwriting a * partial write. However, if we've encountered a short write and only * partially written into a block, it will not be marked uptodate, so a * read_folio might come in and destroy our partial write. * * Do the simplest thing and just treat any short write to a * non-uptodate page as a zero-length write, and force the caller to * redo the whole thing. */ if (unlikely(copied < len && !folio_test_uptodate(folio))) return false; iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len); iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied); filemap_dirty_folio(inode->i_mapping, folio); return true; } static void iomap_write_end_inline(const struct iomap_iter *iter, struct folio *folio, loff_t pos, size_t copied) { const struct iomap *iomap = &iter->iomap; void *addr; WARN_ON_ONCE(!folio_test_uptodate(folio)); BUG_ON(!iomap_inline_data_valid(iomap)); flush_dcache_folio(folio); addr = kmap_local_folio(folio, pos); memcpy(iomap_inline_data(iomap, pos), addr, copied); kunmap_local(addr); mark_inode_dirty(iter->inode); } /* * Returns true if all copied bytes have been written to the pagecache, * otherwise return false. */ static bool iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, size_t copied, struct folio *folio) { const struct iomap *srcmap = iomap_iter_srcmap(iter); if (srcmap->type == IOMAP_INLINE) { iomap_write_end_inline(iter, folio, pos, copied); return true; } if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { size_t bh_written; bh_written = block_write_end(NULL, iter->inode->i_mapping, pos, len, copied, folio, NULL); WARN_ON_ONCE(bh_written != copied && bh_written != 0); return bh_written == copied; } return __iomap_write_end(iter->inode, pos, len, copied, folio); } static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) { loff_t length = iomap_length(iter); loff_t pos = iter->pos; ssize_t total_written = 0; long status = 0; struct address_space *mapping = iter->inode->i_mapping; size_t chunk = mapping_max_folio_size(mapping); unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0; do { struct folio *folio; loff_t old_size; size_t offset; /* Offset into folio */ size_t bytes; /* Bytes to write to folio */ size_t copied; /* Bytes copied from user */ size_t written; /* Bytes have been written */ bytes = iov_iter_count(i); retry: offset = pos & (chunk - 1); bytes = min(chunk - offset, bytes); status = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags); if (unlikely(status)) break; if (bytes > length) bytes = length; /* * Bring in the user page that we'll copy from _first_. * Otherwise there's a nasty deadlock on copying from the * same page as we're writing to, without it being marked * up-to-date. * * For async buffered writes the assumption is that the user * page has already been faulted in. This can be optimized by * faulting the user page. */ if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) { status = -EFAULT; break; } status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) { iomap_write_failed(iter->inode, pos, bytes); break; } if (iter->iomap.flags & IOMAP_F_STALE) break; offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; if (mapping_writably_mapped(mapping)) flush_dcache_folio(folio); copied = copy_folio_from_iter_atomic(folio, offset, bytes, i); written = iomap_write_end(iter, pos, bytes, copied, folio) ? copied : 0; /* * Update the in-memory inode size after copying the data into * the page cache. It's up to the file system to write the * updated size to disk, preferably after I/O completion so that * no stale data is exposed. Only once that's done can we * unlock and release the folio. */ old_size = iter->inode->i_size; if (pos + written > old_size) { i_size_write(iter->inode, pos + written); iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } __iomap_put_folio(iter, pos, written, folio); if (old_size < pos) pagecache_isize_extended(iter->inode, old_size, pos); cond_resched(); if (unlikely(written == 0)) { /* * A short copy made iomap_write_end() reject the * thing entirely. Might be memory poisoning * halfway through, might be a race with munmap, * might be severe memory pressure. */ iomap_write_failed(iter->inode, pos, bytes); iov_iter_revert(i, copied); if (chunk > PAGE_SIZE) chunk /= 2; if (copied) { bytes = copied; goto retry; } } else { pos += written; total_written += written; length -= written; } } while (iov_iter_count(i) && length); if (status == -EAGAIN) { iov_iter_revert(i, total_written); return -EAGAIN; } return total_written ? total_written : status; } ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i, const struct iomap_ops *ops, void *private) { struct iomap_iter iter = { .inode = iocb->ki_filp->f_mapping->host, .pos = iocb->ki_pos, .len = iov_iter_count(i), .flags = IOMAP_WRITE, .private = private, }; ssize_t ret; if (iocb->ki_flags & IOCB_NOWAIT) iter.flags |= IOMAP_NOWAIT; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_write_iter(&iter, i); if (unlikely(iter.pos == iocb->ki_pos)) return ret; ret = iter.pos - iocb->ki_pos; iocb->ki_pos = iter.pos; return ret; } EXPORT_SYMBOL_GPL(iomap_file_buffered_write); static void iomap_write_delalloc_ifs_punch(struct inode *inode, struct folio *folio, loff_t start_byte, loff_t end_byte, struct iomap *iomap, iomap_punch_t punch) { unsigned int first_blk, last_blk, i; loff_t last_byte; u8 blkbits = inode->i_blkbits; struct iomap_folio_state *ifs; /* * When we have per-block dirty tracking, there can be * blocks within a folio which are marked uptodate * but not dirty. In that case it is necessary to punch * out such blocks to avoid leaking any delalloc blocks. */ ifs = folio->private; if (!ifs) return; last_byte = min_t(loff_t, end_byte - 1, folio_pos(folio) + folio_size(folio) - 1); first_blk = offset_in_folio(folio, start_byte) >> blkbits; last_blk = offset_in_folio(folio, last_byte) >> blkbits; for (i = first_blk; i <= last_blk; i++) { if (!ifs_block_is_dirty(folio, ifs, i)) punch(inode, folio_pos(folio) + (i << blkbits), 1 << blkbits, iomap); } } static void iomap_write_delalloc_punch(struct inode *inode, struct folio *folio, loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, struct iomap *iomap, iomap_punch_t punch) { if (!folio_test_dirty(folio)) return; /* if dirty, punch up to offset */ if (start_byte > *punch_start_byte) { punch(inode, *punch_start_byte, start_byte - *punch_start_byte, iomap); } /* Punch non-dirty blocks within folio */ iomap_write_delalloc_ifs_punch(inode, folio, start_byte, end_byte, iomap, punch); /* * Make sure the next punch start is correctly bound to * the end of this data range, not the end of the folio. */ *punch_start_byte = min_t(loff_t, end_byte, folio_pos(folio) + folio_size(folio)); } /* * Scan the data range passed to us for dirty page cache folios. If we find a * dirty folio, punch out the preceding range and update the offset from which * the next punch will start from. * * We can punch out storage reservations under clean pages because they either * contain data that has been written back - in which case the delalloc punch * over that range is a no-op - or they have been read faults in which case they * contain zeroes and we can remove the delalloc backing range and any new * writes to those pages will do the normal hole filling operation... * * This makes the logic simple: we only need to keep the delalloc extents only * over the dirty ranges of the page cache. * * This function uses [start_byte, end_byte) intervals (i.e. open ended) to * simplify range iterations. */ static void iomap_write_delalloc_scan(struct inode *inode, loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte, struct iomap *iomap, iomap_punch_t punch) { while (start_byte < end_byte) { struct folio *folio; /* grab locked page */ folio = filemap_lock_folio(inode->i_mapping, start_byte >> PAGE_SHIFT); if (IS_ERR(folio)) { start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) + PAGE_SIZE; continue; } iomap_write_delalloc_punch(inode, folio, punch_start_byte, start_byte, end_byte, iomap, punch); /* move offset to start of next folio in range */ start_byte = folio_pos(folio) + folio_size(folio); folio_unlock(folio); folio_put(folio); } } /* * When a short write occurs, the filesystem might need to use ->iomap_end * to remove space reservations created in ->iomap_begin. * * For filesystems that use delayed allocation, there can be dirty pages over * the delalloc extent outside the range of a short write but still within the * delalloc extent allocated for this iomap if the write raced with page * faults. * * Punch out all the delalloc blocks in the range given except for those that * have dirty data still pending in the page cache - those are going to be * written and so must still retain the delalloc backing for writeback. * * The punch() callback *must* only punch delalloc extents in the range passed * to it. It must skip over all other types of extents in the range and leave * them completely unchanged. It must do this punch atomically with respect to * other extent modifications. * * The punch() callback may be called with a folio locked to prevent writeback * extent allocation racing at the edge of the range we are currently punching. * The locked folio may or may not cover the range being punched, so it is not * safe for the punch() callback to lock folios itself. * * Lock order is: * * inode->i_rwsem (shared or exclusive) * inode->i_mapping->invalidate_lock (exclusive) * folio_lock() * ->punch * internal filesystem allocation lock * * As we are scanning the page cache for data, we don't need to reimplement the * wheel - mapping_seek_hole_data() does exactly what we need to identify the * start and end of data ranges correctly even for sub-folio block sizes. This * byte range based iteration is especially convenient because it means we * don't have to care about variable size folios, nor where the start or end of * the data range lies within a folio, if they lie within the same folio or even * if there are multiple discontiguous data ranges within the folio. * * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so * can return data ranges that exist in the cache beyond EOF. e.g. a page fault * spanning EOF will initialise the post-EOF data to zeroes and mark it up to * date. A write page fault can then mark it dirty. If we then fail a write() * beyond EOF into that up to date cached range, we allocate a delalloc block * beyond EOF and then have to punch it out. Because the range is up to date, * mapping_seek_hole_data() will return it, and we will skip the punch because * the folio is dirty. THis is incorrect - we always need to punch out delalloc * beyond EOF in this case as writeback will never write back and covert that * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF, * resulting in always punching out the range from the EOF to the end of the * range the iomap spans. * * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte) * returns the end of the data range (data_end). Using closed intervals would * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose * the code to subtle off-by-one bugs.... */ void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte, loff_t end_byte, unsigned flags, struct iomap *iomap, iomap_punch_t punch) { loff_t punch_start_byte = start_byte; loff_t scan_end_byte = min(i_size_read(inode), end_byte); /* * The caller must hold invalidate_lock to avoid races with page faults * re-instantiating folios and dirtying them via ->page_mkwrite whilst * we walk the cache and perform delalloc extent removal. Failing to do * this can leave dirty pages with no space reservation in the cache. */ lockdep_assert_held_write(&inode->i_mapping->invalidate_lock); while (start_byte < scan_end_byte) { loff_t data_end; start_byte = mapping_seek_hole_data(inode->i_mapping, start_byte, scan_end_byte, SEEK_DATA); /* * If there is no more data to scan, all that is left is to * punch out the remaining range. * * Note that mapping_seek_hole_data is only supposed to return * either an offset or -ENXIO, so WARN on any other error as * that would be an API change without updating the callers. */ if (start_byte == -ENXIO || start_byte == scan_end_byte) break; if (WARN_ON_ONCE(start_byte < 0)) return; WARN_ON_ONCE(start_byte < punch_start_byte); WARN_ON_ONCE(start_byte > scan_end_byte); /* * We find the end of this contiguous cached data range by * seeking from start_byte to the beginning of the next hole. */ data_end = mapping_seek_hole_data(inode->i_mapping, start_byte, scan_end_byte, SEEK_HOLE); if (WARN_ON_ONCE(data_end < 0)) return; /* * If we race with post-direct I/O invalidation of the page cache, * there might be no data left at start_byte. */ if (data_end == start_byte) continue; WARN_ON_ONCE(data_end < start_byte); WARN_ON_ONCE(data_end > scan_end_byte); iomap_write_delalloc_scan(inode, &punch_start_byte, start_byte, data_end, iomap, punch); /* The next data search starts at the end of this one. */ start_byte = data_end; } if (punch_start_byte < end_byte) punch(inode, punch_start_byte, end_byte - punch_start_byte, iomap); } EXPORT_SYMBOL_GPL(iomap_write_delalloc_release); static loff_t iomap_unshare_iter(struct iomap_iter *iter) { struct iomap *iomap = &iter->iomap; loff_t pos = iter->pos; loff_t length = iomap_length(iter); loff_t written = 0; if (!iomap_want_unshare_iter(iter)) return length; do { struct folio *folio; int status; size_t offset; size_t bytes = min_t(u64, SIZE_MAX, length); bool ret; status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) return status; if (iomap->flags & IOMAP_F_STALE) break; offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; ret = iomap_write_end(iter, pos, bytes, bytes, folio); __iomap_put_folio(iter, pos, bytes, folio); if (WARN_ON_ONCE(!ret)) return -EIO; cond_resched(); pos += bytes; written += bytes; length -= bytes; balance_dirty_pages_ratelimited(iter->inode->i_mapping); } while (length > 0); return written; } int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .flags = IOMAP_WRITE | IOMAP_UNSHARE, }; loff_t size = i_size_read(inode); int ret; if (pos < 0 || pos >= size) return 0; iter.len = min(len, size - pos); while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_unshare_iter(&iter); return ret; } EXPORT_SYMBOL_GPL(iomap_file_unshare); /* * Flush the remaining range of the iter and mark the current mapping stale. * This is used when zero range sees an unwritten mapping that may have had * dirty pagecache over it. */ static inline int iomap_zero_iter_flush_and_stale(struct iomap_iter *i) { struct address_space *mapping = i->inode->i_mapping; loff_t end = i->pos + i->len - 1; i->iomap.flags |= IOMAP_F_STALE; return filemap_write_and_wait_range(mapping, i->pos, end); } static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) { loff_t pos = iter->pos; loff_t length = iomap_length(iter); loff_t written = 0; do { struct folio *folio; int status; size_t offset; size_t bytes = min_t(u64, SIZE_MAX, length); bool ret; status = iomap_write_begin(iter, pos, bytes, &folio); if (status) return status; if (iter->iomap.flags & IOMAP_F_STALE) break; /* warn about zeroing folios beyond eof that won't write back */ WARN_ON_ONCE(folio_pos(folio) > iter->inode->i_size); offset = offset_in_folio(folio, pos); if (bytes > folio_size(folio) - offset) bytes = folio_size(folio) - offset; folio_zero_range(folio, offset, bytes); folio_mark_accessed(folio); ret = iomap_write_end(iter, pos, bytes, bytes, folio); __iomap_put_folio(iter, pos, bytes, folio); if (WARN_ON_ONCE(!ret)) return -EIO; pos += bytes; length -= bytes; written += bytes; } while (length > 0); if (did_zero) *did_zero = true; return written; } int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = inode, .pos = pos, .len = len, .flags = IOMAP_ZERO, }; struct address_space *mapping = inode->i_mapping; unsigned int blocksize = i_blocksize(inode); unsigned int off = pos & (blocksize - 1); loff_t plen = min_t(loff_t, len, blocksize - off); int ret; bool range_dirty; /* * Zero range can skip mappings that are zero on disk so long as * pagecache is clean. If pagecache was dirty prior to zero range, the * mapping converts on writeback completion and so must be zeroed. * * The simplest way to deal with this across a range is to flush * pagecache and process the updated mappings. To avoid excessive * flushing on partial eof zeroing, special case it to zero the * unaligned start portion if already dirty in pagecache. */ if (off && filemap_range_needs_writeback(mapping, pos, pos + plen - 1)) { iter.len = plen; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_zero_iter(&iter, did_zero); iter.len = len - (iter.pos - pos); if (ret || !iter.len) return ret; } /* * To avoid an unconditional flush, check pagecache state and only flush * if dirty and the fs returns a mapping that might convert on * writeback. */ range_dirty = filemap_range_needs_writeback(inode->i_mapping, iter.pos, iter.pos + iter.len - 1); while ((ret = iomap_iter(&iter, ops)) > 0) { const struct iomap *srcmap = iomap_iter_srcmap(&iter); if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN) { loff_t proc = iomap_length(&iter); if (range_dirty) { range_dirty = false; proc = iomap_zero_iter_flush_and_stale(&iter); } iter.processed = proc; continue; } iter.processed = iomap_zero_iter(&iter, did_zero); } return ret; } EXPORT_SYMBOL_GPL(iomap_zero_range); int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops) { unsigned int blocksize = i_blocksize(inode); unsigned int off = pos & (blocksize - 1); /* Block boundary? Nothing to do */ if (!off) return 0; return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops); } EXPORT_SYMBOL_GPL(iomap_truncate_page); static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter, struct folio *folio) { loff_t length = iomap_length(iter); int ret; if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) { ret = __block_write_begin_int(folio, iter->pos, length, NULL, &iter->iomap); if (ret) return ret; block_commit_write(&folio->page, 0, length); } else { WARN_ON_ONCE(!folio_test_uptodate(folio)); folio_mark_dirty(folio); } return length; } vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops) { struct iomap_iter iter = { .inode = file_inode(vmf->vma->vm_file), .flags = IOMAP_WRITE | IOMAP_FAULT, }; struct folio *folio = page_folio(vmf->page); ssize_t ret; folio_lock(folio); ret = folio_mkwrite_check_truncate(folio, iter.inode); if (ret < 0) goto out_unlock; iter.pos = folio_pos(folio); iter.len = ret; while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_folio_mkwrite_iter(&iter, folio); if (ret < 0) goto out_unlock; folio_wait_stable(folio); return VM_FAULT_LOCKED; out_unlock: folio_unlock(folio); return vmf_fs_error(ret); } EXPORT_SYMBOL_GPL(iomap_page_mkwrite); static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, size_t len) { struct iomap_folio_state *ifs = folio->private; WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs); WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0); if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending)) folio_end_writeback(folio); } /* * We're now finished for good with this ioend structure. Update the page * state, release holds on bios, and finally free up memory. Do not use the * ioend after this. */ static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error) { struct inode *inode = ioend->io_inode; struct bio *bio = &ioend->io_bio; struct folio_iter fi; u32 folio_count = 0; if (error) { mapping_set_error(inode->i_mapping, error); if (!bio_flagged(bio, BIO_QUIET)) { pr_err_ratelimited( "%s: writeback error on inode %lu, offset %lld, sector %llu", inode->i_sb->s_id, inode->i_ino, ioend->io_offset, ioend->io_sector); } } /* walk all folios in bio, ending page IO on them */ bio_for_each_folio_all(fi, bio) { iomap_finish_folio_write(inode, fi.folio, fi.length); folio_count++; } bio_put(bio); /* frees the ioend */ return folio_count; } /* * Ioend completion routine for merged bios. This can only be called from task * contexts as merged ioends can be of unbound length. Hence we have to break up * the writeback completions into manageable chunks to avoid long scheduler * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get * good batch processing throughput without creating adverse scheduler latency * conditions. */ void iomap_finish_ioends(struct iomap_ioend *ioend, int error) { struct list_head tmp; u32 completions; might_sleep(); list_replace_init(&ioend->io_list, &tmp); completions = iomap_finish_ioend(ioend, error); while (!list_empty(&tmp)) { if (completions > IOEND_BATCH_SIZE * 8) { cond_resched(); completions = 0; } ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); list_del_init(&ioend->io_list); completions += iomap_finish_ioend(ioend, error); } } EXPORT_SYMBOL_GPL(iomap_finish_ioends); /* * We can merge two adjacent ioends if they have the same set of work to do. */ static bool iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) { if (ioend->io_bio.bi_status != next->io_bio.bi_status) return false; if (next->io_flags & IOMAP_F_BOUNDARY) return false; if ((ioend->io_flags & IOMAP_F_SHARED) ^ (next->io_flags & IOMAP_F_SHARED)) return false; if ((ioend->io_type == IOMAP_UNWRITTEN) ^ (next->io_type == IOMAP_UNWRITTEN)) return false; if (ioend->io_offset + ioend->io_size != next->io_offset) return false; /* * Do not merge physically discontiguous ioends. The filesystem * completion functions will have to iterate the physical * discontiguities even if we merge the ioends at a logical level, so * we don't gain anything by merging physical discontiguities here. * * We cannot use bio->bi_iter.bi_sector here as it is modified during * submission so does not point to the start sector of the bio at * completion. */ if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) return false; return true; } void iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends) { struct iomap_ioend *next; INIT_LIST_HEAD(&ioend->io_list); while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend, io_list))) { if (!iomap_ioend_can_merge(ioend, next)) break; list_move_tail(&next->io_list, &ioend->io_list); ioend->io_size += next->io_size; } } EXPORT_SYMBOL_GPL(iomap_ioend_try_merge); static int iomap_ioend_compare(void *priv, const struct list_head *a, const struct list_head *b) { struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list); struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list); if (ia->io_offset < ib->io_offset) return -1; if (ia->io_offset > ib->io_offset) return 1; return 0; } void iomap_sort_ioends(struct list_head *ioend_list) { list_sort(NULL, ioend_list, iomap_ioend_compare); } EXPORT_SYMBOL_GPL(iomap_sort_ioends); static void iomap_writepage_end_bio(struct bio *bio) { iomap_finish_ioend(iomap_ioend_from_bio(bio), blk_status_to_errno(bio->bi_status)); } /* * Submit the final bio for an ioend. * * If @error is non-zero, it means that we have a situation where some part of * the submission process has failed after we've marked pages for writeback. * We cannot cancel ioend directly in that case, so call the bio end I/O handler * with the error status here to run the normal I/O completion handler to clear * the writeback bit and let the file system proess the errors. */ static int iomap_submit_ioend(struct iomap_writepage_ctx *wpc, int error) { if (!wpc->ioend) return error; /* * Let the file systems prepare the I/O submission and hook in an I/O * comletion handler. This also needs to happen in case after a * failure happened so that the file system end I/O handler gets called * to clean up. */ if (wpc->ops->prepare_ioend) error = wpc->ops->prepare_ioend(wpc->ioend, error); if (error) { wpc->ioend->io_bio.bi_status = errno_to_blk_status(error); bio_endio(&wpc->ioend->io_bio); } else { submit_bio(&wpc->ioend->io_bio); } wpc->ioend = NULL; return error; } static struct iomap_ioend *iomap_alloc_ioend(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct inode *inode, loff_t pos) { struct iomap_ioend *ioend; struct bio *bio; bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS, REQ_OP_WRITE | wbc_to_write_flags(wbc), GFP_NOFS, &iomap_ioend_bioset); bio->bi_iter.bi_sector = iomap_sector(&wpc->iomap, pos); bio->bi_end_io = iomap_writepage_end_bio; wbc_init_bio(wbc, bio); bio->bi_write_hint = inode->i_write_hint; ioend = iomap_ioend_from_bio(bio); INIT_LIST_HEAD(&ioend->io_list); ioend->io_type = wpc->iomap.type; ioend->io_flags = wpc->iomap.flags; if (pos > wpc->iomap.offset) wpc->iomap.flags &= ~IOMAP_F_BOUNDARY; ioend->io_inode = inode; ioend->io_size = 0; ioend->io_offset = pos; ioend->io_sector = bio->bi_iter.bi_sector; wpc->nr_folios = 0; return ioend; } static bool iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t pos) { if (wpc->iomap.offset == pos && (wpc->iomap.flags & IOMAP_F_BOUNDARY)) return false; if ((wpc->iomap.flags & IOMAP_F_SHARED) != (wpc->ioend->io_flags & IOMAP_F_SHARED)) return false; if (wpc->iomap.type != wpc->ioend->io_type) return false; if (pos != wpc->ioend->io_offset + wpc->ioend->io_size) return false; if (iomap_sector(&wpc->iomap, pos) != bio_end_sector(&wpc->ioend->io_bio)) return false; /* * Limit ioend bio chain lengths to minimise IO completion latency. This * also prevents long tight loops ending page writeback on all the * folios in the ioend. */ if (wpc->nr_folios >= IOEND_BATCH_SIZE) return false; return true; } /* * Test to see if we have an existing ioend structure that we could append to * first; otherwise finish off the current ioend and start another. * * If a new ioend is created and cached, the old ioend is submitted to the block * layer instantly. Batching optimisations are provided by higher level block * plugging. * * At the end of a writeback pass, there will be a cached ioend remaining on the * writepage context that the caller will need to submit. */ static int iomap_add_to_ioend(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct folio *folio, struct inode *inode, loff_t pos, loff_t end_pos, unsigned len) { struct iomap_folio_state *ifs = folio->private; size_t poff = offset_in_folio(folio, pos); int error; if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos)) { new_ioend: error = iomap_submit_ioend(wpc, 0); if (error) return error; wpc->ioend = iomap_alloc_ioend(wpc, wbc, inode, pos); } if (!bio_add_folio(&wpc->ioend->io_bio, folio, len, poff)) goto new_ioend; if (ifs) atomic_add(len, &ifs->write_bytes_pending); /* * Clamp io_offset and io_size to the incore EOF so that ondisk * file size updates in the ioend completion are byte-accurate. * This avoids recovering files with zeroed tail regions when * writeback races with appending writes: * * Thread 1: Thread 2: * ------------ ----------- * write [A, A+B] * update inode size to A+B * submit I/O [A, A+BS] * write [A+B, A+B+C] * update inode size to A+B+C * <I/O completes, updates disk size to min(A+B+C, A+BS)> * <power failure> * * After reboot: * 1) with A+B+C < A+BS, the file has zero padding in range * [A+B, A+B+C] * * |< Block Size (BS) >| * |DDDDDDDDDDDD0000000000000| * ^ ^ ^ * A A+B A+B+C * (EOF) * * 2) with A+B+C > A+BS, the file has zero padding in range * [A+B, A+BS] * * |< Block Size (BS) >|< Block Size (BS) >| * |DDDDDDDDDDDD0000000000000|00000000000000000000000000| * ^ ^ ^ ^ * A A+B A+BS A+B+C * (EOF) * * D = Valid Data * 0 = Zero Padding * * Note that this defeats the ability to chain the ioends of * appending writes. */ wpc->ioend->io_size += len; if (wpc->ioend->io_offset + wpc->ioend->io_size > end_pos) wpc->ioend->io_size = end_pos - wpc->ioend->io_offset; wbc_account_cgroup_owner(wbc, folio, len); return 0; } static int iomap_writepage_map_blocks(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct folio *folio, struct inode *inode, u64 pos, u64 end_pos, unsigned dirty_len, unsigned *count) { int error; do { unsigned map_len; error = wpc->ops->map_blocks(wpc, inode, pos, dirty_len); if (error) break; trace_iomap_writepage_map(inode, pos, dirty_len, &wpc->iomap); map_len = min_t(u64, dirty_len, wpc->iomap.offset + wpc->iomap.length - pos); WARN_ON_ONCE(!folio->private && map_len < dirty_len); switch (wpc->iomap.type) { case IOMAP_INLINE: WARN_ON_ONCE(1); error = -EIO; break; case IOMAP_HOLE: break; default: error = iomap_add_to_ioend(wpc, wbc, folio, inode, pos, end_pos, map_len); if (!error) (*count)++; break; } dirty_len -= map_len; pos += map_len; } while (dirty_len && !error); /* * We cannot cancel the ioend directly here on error. We may have * already set other pages under writeback and hence we have to run I/O * completion to mark the error state of the pages under writeback * appropriately. * * Just let the file system know what portion of the folio failed to * map. */ if (error && wpc->ops->discard_folio) wpc->ops->discard_folio(folio, pos); return error; } /* * Check interaction of the folio with the file end. * * If the folio is entirely beyond i_size, return false. If it straddles * i_size, adjust end_pos and zero all data beyond i_size. */ static bool iomap_writepage_handle_eof(struct folio *folio, struct inode *inode, u64 *end_pos) { u64 isize = i_size_read(inode); if (*end_pos > isize) { size_t poff = offset_in_folio(folio, isize); pgoff_t end_index = isize >> PAGE_SHIFT; /* * If the folio is entirely ouside of i_size, skip it. * * This can happen due to a truncate operation that is in * progress and in that case truncate will finish it off once * we've dropped the folio lock. * * Note that the pgoff_t used for end_index is an unsigned long. * If the given offset is greater than 16TB on a 32-bit system, * then if we checked if the folio is fully outside i_size with * "if (folio->index >= end_index + 1)", "end_index + 1" would * overflow and evaluate to 0. Hence this folio would be * redirtied and written out repeatedly, which would result in * an infinite loop; the user program performing this operation * would hang. Instead, we can detect this situation by * checking if the folio is totally beyond i_size or if its * offset is just equal to the EOF. */ if (folio->index > end_index || (folio->index == end_index && poff == 0)) return false; /* * The folio straddles i_size. * * It must be zeroed out on each and every writepage invocation * because it may be mmapped: * * A file is mapped in multiples of the page size. For a * file that is not a multiple of the page size, the * remaining memory is zeroed when mapped, and writes to that * region are not written out to the file. * * Also adjust the end_pos to the end of file and skip writeback * for all blocks entirely beyond i_size. */ folio_zero_segment(folio, poff, folio_size(folio)); *end_pos = isize; } return true; } static int iomap_writepage_map(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct folio *folio) { struct iomap_folio_state *ifs = folio->private; struct inode *inode = folio->mapping->host; u64 pos = folio_pos(folio); u64 end_pos = pos + folio_size(folio); u64 end_aligned = 0; unsigned count = 0; int error = 0; u32 rlen; WARN_ON_ONCE(!folio_test_locked(folio)); WARN_ON_ONCE(folio_test_dirty(folio)); WARN_ON_ONCE(folio_test_writeback(folio)); trace_iomap_writepage(inode, pos, folio_size(folio)); if (!iomap_writepage_handle_eof(folio, inode, &end_pos)) { folio_unlock(folio); return 0; } WARN_ON_ONCE(end_pos <= pos); if (i_blocks_per_folio(inode, folio) > 1) { if (!ifs) { ifs = ifs_alloc(inode, folio, 0); iomap_set_range_dirty(folio, 0, end_pos - pos); } /* * Keep the I/O completion handler from clearing the writeback * bit until we have submitted all blocks by adding a bias to * ifs->write_bytes_pending, which is dropped after submitting * all blocks. */ WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending) != 0); atomic_inc(&ifs->write_bytes_pending); } /* * Set the writeback bit ASAP, as the I/O completion for the single * block per folio case happen hit as soon as we're submitting the bio. */ folio_start_writeback(folio); /* * Walk through the folio to find dirty areas to write back. */ end_aligned = round_up(end_pos, i_blocksize(inode)); while ((rlen = iomap_find_dirty_range(folio, &pos, end_aligned))) { error = iomap_writepage_map_blocks(wpc, wbc, folio, inode, pos, end_pos, rlen, &count); if (error) break; pos += rlen; } if (count) wpc->nr_folios++; /* * We can have dirty bits set past end of file in page_mkwrite path * while mapping the last partial folio. Hence it's better to clear * all the dirty bits in the folio here. */ iomap_clear_range_dirty(folio, 0, folio_size(folio)); /* * Usually the writeback bit is cleared by the I/O completion handler. * But we may end up either not actually writing any blocks, or (when * there are multiple blocks in a folio) all I/O might have finished * already at this point. In that case we need to clear the writeback * bit ourselves right after unlocking the page. */ folio_unlock(folio); if (ifs) { if (atomic_dec_and_test(&ifs->write_bytes_pending)) folio_end_writeback(folio); } else { if (!count) folio_end_writeback(folio); } mapping_set_error(inode->i_mapping, error); return error; } int iomap_writepages(struct address_space *mapping, struct writeback_control *wbc, struct iomap_writepage_ctx *wpc, const struct iomap_writeback_ops *ops) { struct folio *folio = NULL; int error; /* * Writeback from reclaim context should never happen except in the case * of a VM regression so warn about it and refuse to write the data. */ if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC | PF_KSWAPD)) == PF_MEMALLOC)) return -EIO; wpc->ops = ops; while ((folio = writeback_iter(mapping, wbc, folio, &error))) error = iomap_writepage_map(wpc, wbc, folio); return iomap_submit_ioend(wpc, error); } EXPORT_SYMBOL_GPL(iomap_writepages); static int __init iomap_buffered_init(void) { return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE), offsetof(struct iomap_ioend, io_bio), BIOSET_NEED_BVECS); } fs_initcall(iomap_buffered_init);
20 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 // SPDX-License-Identifier: GPL-2.0 /* * procfs-based user access to knfsd statistics * * /proc/net/rpc/nfsd * * Format: * rc <hits> <misses> <nocache> * Statistsics for the reply cache * fh <stale> <deprecated filehandle cache stats> * statistics for filehandle lookup * io <bytes-read> <bytes-written> * statistics for IO throughput * th <threads> <deprecated thread usage histogram stats> * number of threads * ra <deprecated ra-cache stats> * * plus generic RPC stats (see net/sunrpc/stats.c) * * Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de> */ #include <linux/seq_file.h> #include <linux/module.h> #include <linux/sunrpc/stats.h> #include <net/net_namespace.h> #include "nfsd.h" static int nfsd_show(struct seq_file *seq, void *v) { struct net *net = pde_data(file_inode(seq->file)); struct nfsd_net *nn = net_generic(net, nfsd_net_id); int i; seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n", percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_HITS]), percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_MISSES]), percpu_counter_sum_positive(&nn->counter[NFSD_STATS_RC_NOCACHE]), percpu_counter_sum_positive(&nn->counter[NFSD_STATS_FH_STALE]), percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_READ]), percpu_counter_sum_positive(&nn->counter[NFSD_STATS_IO_WRITE])); /* thread usage: */ seq_printf(seq, "th %u 0", atomic_read(&nfsd_th_cnt)); /* deprecated thread usage histogram stats */ for (i = 0; i < 10; i++) seq_puts(seq, " 0.000"); /* deprecated ra-cache stats */ seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n"); /* show my rpc info */ svc_seq_show(seq, &nn->nfsd_svcstats); #ifdef CONFIG_NFSD_V4 /* Show count for individual nfsv4 operations */ /* Writing operation numbers 0 1 2 also for maintaining uniformity */ seq_printf(seq, "proc4ops %u", LAST_NFS4_OP + 1); for (i = 0; i <= LAST_NFS4_OP; i++) { seq_printf(seq, " %lld", percpu_counter_sum_positive(&nn->counter[NFSD_STATS_NFS4_OP(i)])); } seq_printf(seq, "\nwdeleg_getattr %lld", percpu_counter_sum_positive(&nn->counter[NFSD_STATS_WDELEG_GETATTR])); seq_putc(seq, '\n'); #endif return 0; } DEFINE_PROC_SHOW_ATTRIBUTE(nfsd); void nfsd_proc_stat_init(struct net *net) { struct nfsd_net *nn = net_generic(net, nfsd_net_id); svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops); } void nfsd_proc_stat_shutdown(struct net *net) { svc_proc_unregister(net, "nfsd"); }
32 31 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 /* SPDX-License-Identifier: GPL-2.0-only */ /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */ #ifndef __LINUX_IOMMU_PRIV_H #define __LINUX_IOMMU_PRIV_H #include <linux/iommu.h> static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) { /* * Assume that valid ops must be installed if iommu_probe_device() * has succeeded. The device ops are essentially for internal use * within the IOMMU subsystem itself, so we should be able to trust * ourselves not to misuse the helper. */ return dev->iommu->iommu_dev->ops; } const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode); static inline const struct iommu_ops *iommu_fwspec_ops(struct iommu_fwspec *fwspec) { return iommu_ops_from_fwnode(fwspec ? fwspec->iommu_fwnode : NULL); } int iommu_group_replace_domain(struct iommu_group *group, struct iommu_domain *new_domain); int iommu_device_register_bus(struct iommu_device *iommu, const struct iommu_ops *ops, const struct bus_type *bus, struct notifier_block *nb); void iommu_device_unregister_bus(struct iommu_device *iommu, const struct bus_type *bus, struct notifier_block *nb); struct iommu_attach_handle *iommu_attach_handle_get(struct iommu_group *group, ioasid_t pasid, unsigned int type); int iommu_attach_group_handle(struct iommu_domain *domain, struct iommu_group *group, struct iommu_attach_handle *handle); void iommu_detach_group_handle(struct iommu_domain *domain, struct iommu_group *group); int iommu_replace_group_handle(struct iommu_group *group, struct iommu_domain *new_domain, struct iommu_attach_handle *handle); #endif /* __LINUX_IOMMU_PRIV_H */
61 65 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_CONNTRACK_SEQADJ_H #define _NF_CONNTRACK_SEQADJ_H #include <net/netfilter/nf_conntrack_extend.h> /** * struct nf_ct_seqadj - sequence number adjustment information * * @correction_pos: position of the last TCP sequence number modification * @offset_before: sequence number offset before last modification * @offset_after: sequence number offset after last modification */ struct nf_ct_seqadj { u32 correction_pos; s32 offset_before; s32 offset_after; }; struct nf_conn_seqadj { struct nf_ct_seqadj seq[IP_CT_DIR_MAX]; }; static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct) { return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ); } static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct) { return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC); } int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, __be32 seq, s32 off); void nf_ct_tcp_seqadj_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, s32 off); int nf_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int protoff); s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir, u32 seq); #endif /* _NF_CONNTRACK_SEQADJ_H */
50 50 51 67 67 67 51 67 8 9 7 67 65 67 67 66 67 67 59 67 33 65 23 23 22 66 67 66 66 67 67 67 55 55 19 52 52 60 11 60 59 53 51 51 60 60 60 60 59 64 60 59 67 66 67 66 66 66 66 66 11 66 66 65 65 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 67 66 67 66 66 66 67 67 66 64 64 11 65 65 65 59 65 60 60 65 65 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 // SPDX-License-Identifier: GPL-2.0-or-later /* * 842 Software Compression * * Copyright (C) 2015 Dan Streetman, IBM Corp * * See 842.h for details of the 842 compressed format. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "842_compress" #include <linux/hashtable.h> #include "842.h" #include "842_debugfs.h" #define SW842_HASHTABLE8_BITS (10) #define SW842_HASHTABLE4_BITS (11) #define SW842_HASHTABLE2_BITS (10) /* By default, we allow compressing input buffers of any length, but we must * use the non-standard "short data" template so the decompressor can correctly * reproduce the uncompressed data buffer at the right length. However the * hardware 842 compressor will not recognize the "short data" template, and * will fail to decompress any compressed buffer containing it (I have no idea * why anyone would want to use software to compress and hardware to decompress * but that's beside the point). This parameter forces the compression * function to simply reject any input buffer that isn't a multiple of 8 bytes * long, instead of using the "short data" template, so that all compressed * buffers produced by this function will be decompressable by the 842 hardware * decompressor. Unless you have a specific need for that, leave this disabled * so that any length buffer can be compressed. */ static bool sw842_strict; module_param_named(strict, sw842_strict, bool, 0644); static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */ { I8, N0, N0, N0, 0x19 }, /* 8 */ { I4, I4, N0, N0, 0x18 }, /* 18 */ { I4, I2, I2, N0, 0x17 }, /* 25 */ { I2, I2, I4, N0, 0x13 }, /* 25 */ { I2, I2, I2, I2, 0x12 }, /* 32 */ { I4, I2, D2, N0, 0x16 }, /* 33 */ { I4, D2, I2, N0, 0x15 }, /* 33 */ { I2, D2, I4, N0, 0x0e }, /* 33 */ { D2, I2, I4, N0, 0x09 }, /* 33 */ { I2, I2, I2, D2, 0x11 }, /* 40 */ { I2, I2, D2, I2, 0x10 }, /* 40 */ { I2, D2, I2, I2, 0x0d }, /* 40 */ { D2, I2, I2, I2, 0x08 }, /* 40 */ { I4, D4, N0, N0, 0x14 }, /* 41 */ { D4, I4, N0, N0, 0x04 }, /* 41 */ { I2, I2, D4, N0, 0x0f }, /* 48 */ { I2, D2, I2, D2, 0x0c }, /* 48 */ { I2, D4, I2, N0, 0x0b }, /* 48 */ { D2, I2, I2, D2, 0x07 }, /* 48 */ { D2, I2, D2, I2, 0x06 }, /* 48 */ { D4, I2, I2, N0, 0x03 }, /* 48 */ { I2, D2, D4, N0, 0x0a }, /* 56 */ { D2, I2, D4, N0, 0x05 }, /* 56 */ { D4, I2, D2, N0, 0x02 }, /* 56 */ { D4, D2, I2, N0, 0x01 }, /* 56 */ { D8, N0, N0, N0, 0x00 }, /* 64 */ }; struct sw842_hlist_node8 { struct hlist_node node; u64 data; u8 index; }; struct sw842_hlist_node4 { struct hlist_node node; u32 data; u16 index; }; struct sw842_hlist_node2 { struct hlist_node node; u16 data; u8 index; }; #define INDEX_NOT_FOUND (-1) #define INDEX_NOT_CHECKED (-2) struct sw842_param { u8 *in; u8 *instart; u64 ilen; u8 *out; u64 olen; u8 bit; u64 data8[1]; u32 data4[2]; u16 data2[4]; int index8[1]; int index4[2]; int index2[4]; DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS); DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS); DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS); struct sw842_hlist_node8 node8[1 << I8_BITS]; struct sw842_hlist_node4 node4[1 << I4_BITS]; struct sw842_hlist_node2 node2[1 << I2_BITS]; }; #define get_input_data(p, o, b) \ be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o)))) #define init_hashtable_nodes(p, b) do { \ int _i; \ hash_init((p)->htable##b); \ for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \ (p)->node##b[_i].index = _i; \ (p)->node##b[_i].data = 0; \ INIT_HLIST_NODE(&(p)->node##b[_i].node); \ } \ } while (0) #define find_index(p, b, n) ({ \ struct sw842_hlist_node##b *_n; \ p->index##b[n] = INDEX_NOT_FOUND; \ hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \ if (p->data##b[n] == _n->data) { \ p->index##b[n] = _n->index; \ break; \ } \ } \ p->index##b[n] >= 0; \ }) #define check_index(p, b, n) \ ((p)->index##b[n] == INDEX_NOT_CHECKED \ ? find_index(p, b, n) \ : (p)->index##b[n] >= 0) #define replace_hash(p, b, i, d) do { \ struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \ hash_del(&_n->node); \ _n->data = (p)->data##b[d]; \ pr_debug("add hash index%x %x pos %x data %lx\n", b, \ (unsigned int)_n->index, \ (unsigned int)((p)->in - (p)->instart), \ (unsigned long)_n->data); \ hash_add((p)->htable##b, &_n->node, _n->data); \ } while (0) static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe }; static int add_bits(struct sw842_param *p, u64 d, u8 n); static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) { int ret; if (n <= s) return -EINVAL; ret = add_bits(p, d >> s, n - s); if (ret) return ret; return add_bits(p, d & GENMASK_ULL(s - 1, 0), s); } static int add_bits(struct sw842_param *p, u64 d, u8 n) { int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits; u64 o; u8 *out = p->out; pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); if (n > 64) return -EINVAL; /* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0), * or if we're at the end of the output buffer and would write past end */ if (bits > 64) return __split_add_bits(p, d, n, 32); else if (p->olen < 8 && bits > 32 && bits <= 56) return __split_add_bits(p, d, n, 16); else if (p->olen < 4 && bits > 16 && bits <= 24) return __split_add_bits(p, d, n, 8); if (DIV_ROUND_UP(bits, 8) > p->olen) return -ENOSPC; o = *out & bmask[b]; d <<= s; if (bits <= 8) *out = o | d; else if (bits <= 16) put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out); else if (bits <= 24) put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out); else if (bits <= 32) put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out); else if (bits <= 40) put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out); else if (bits <= 48) put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out); else if (bits <= 56) put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out); else put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out); p->bit += n; if (p->bit > 7) { p->out += p->bit / 8; p->olen -= p->bit / 8; p->bit %= 8; } return 0; } static int add_template(struct sw842_param *p, u8 c) { int ret, i, b = 0; u8 *t = comp_ops[c]; bool inv = false; if (c >= OPS_MAX) return -EINVAL; pr_debug("template %x\n", t[4]); ret = add_bits(p, t[4], OP_BITS); if (ret) return ret; for (i = 0; i < 4; i++) { pr_debug("op %x\n", t[i]); switch (t[i] & OP_AMOUNT) { case OP_AMOUNT_8: if (b) inv = true; else if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index8[0], I8_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data8[0], 64); else inv = true; break; case OP_AMOUNT_4: if (b == 2 && t[i] & OP_ACTION_DATA) ret = add_bits(p, get_input_data(p, 2, 32), 32); else if (b != 0 && b != 4) inv = true; else if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index4[b >> 2], I4_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data4[b >> 2], 32); else inv = true; break; case OP_AMOUNT_2: if (b != 0 && b != 2 && b != 4 && b != 6) inv = true; if (t[i] & OP_ACTION_INDEX) ret = add_bits(p, p->index2[b >> 1], I2_BITS); else if (t[i] & OP_ACTION_DATA) ret = add_bits(p, p->data2[b >> 1], 16); else inv = true; break; case OP_AMOUNT_0: inv = (b != 8) || !(t[i] & OP_ACTION_NOOP); break; default: inv = true; break; } if (ret) return ret; if (inv) { pr_err("Invalid templ %x op %d : %x %x %x %x\n", c, i, t[0], t[1], t[2], t[3]); return -EINVAL; } b += t[i] & OP_AMOUNT; } if (b != 8) { pr_err("Invalid template %x len %x : %x %x %x %x\n", c, b, t[0], t[1], t[2], t[3]); return -EINVAL; } if (sw842_template_counts) atomic_inc(&template_count[t[4]]); return 0; } static int add_repeat_template(struct sw842_param *p, u8 r) { int ret; /* repeat param is 0-based */ if (!r || --r > REPEAT_BITS_MAX) return -EINVAL; ret = add_bits(p, OP_REPEAT, OP_BITS); if (ret) return ret; ret = add_bits(p, r, REPEAT_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_repeat_count); return 0; } static int add_short_data_template(struct sw842_param *p, u8 b) { int ret, i; if (!b || b > SHORT_DATA_BITS_MAX) return -EINVAL; ret = add_bits(p, OP_SHORT_DATA, OP_BITS); if (ret) return ret; ret = add_bits(p, b, SHORT_DATA_BITS); if (ret) return ret; for (i = 0; i < b; i++) { ret = add_bits(p, p->in[i], 8); if (ret) return ret; } if (sw842_template_counts) atomic_inc(&template_short_data_count); return 0; } static int add_zeros_template(struct sw842_param *p) { int ret = add_bits(p, OP_ZEROS, OP_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_zeros_count); return 0; } static int add_end_template(struct sw842_param *p) { int ret = add_bits(p, OP_END, OP_BITS); if (ret) return ret; if (sw842_template_counts) atomic_inc(&template_end_count); return 0; } static bool check_template(struct sw842_param *p, u8 c) { u8 *t = comp_ops[c]; int i, match, b = 0; if (c >= OPS_MAX) return false; for (i = 0; i < 4; i++) { if (t[i] & OP_ACTION_INDEX) { if (t[i] & OP_AMOUNT_2) match = check_index(p, 2, b >> 1); else if (t[i] & OP_AMOUNT_4) match = check_index(p, 4, b >> 2); else if (t[i] & OP_AMOUNT_8) match = check_index(p, 8, 0); else return false; if (!match) return false; } b += t[i] & OP_AMOUNT; } return true; } static void get_next_data(struct sw842_param *p) { p->data8[0] = get_input_data(p, 0, 64); p->data4[0] = get_input_data(p, 0, 32); p->data4[1] = get_input_data(p, 4, 32); p->data2[0] = get_input_data(p, 0, 16); p->data2[1] = get_input_data(p, 2, 16); p->data2[2] = get_input_data(p, 4, 16); p->data2[3] = get_input_data(p, 6, 16); } /* update the hashtable entries. * only call this after finding/adding the current template * the dataN fields for the current 8 byte block must be already updated */ static void update_hashtables(struct sw842_param *p) { u64 pos = p->in - p->instart; u64 n8 = (pos >> 3) % (1 << I8_BITS); u64 n4 = (pos >> 2) % (1 << I4_BITS); u64 n2 = (pos >> 1) % (1 << I2_BITS); replace_hash(p, 8, n8, 0); replace_hash(p, 4, n4, 0); replace_hash(p, 4, n4, 1); replace_hash(p, 2, n2, 0); replace_hash(p, 2, n2, 1); replace_hash(p, 2, n2, 2); replace_hash(p, 2, n2, 3); } /* find the next template to use, and add it * the p->dataN fields must already be set for the current 8 byte block */ static int process_next(struct sw842_param *p) { int ret, i; p->index8[0] = INDEX_NOT_CHECKED; p->index4[0] = INDEX_NOT_CHECKED; p->index4[1] = INDEX_NOT_CHECKED; p->index2[0] = INDEX_NOT_CHECKED; p->index2[1] = INDEX_NOT_CHECKED; p->index2[2] = INDEX_NOT_CHECKED; p->index2[3] = INDEX_NOT_CHECKED; /* check up to OPS_MAX - 1; last op is our fallback */ for (i = 0; i < OPS_MAX - 1; i++) { if (check_template(p, i)) break; } ret = add_template(p, i); if (ret) return ret; return 0; } /** * sw842_compress * * Compress the uncompressed buffer of length @ilen at @in to the output buffer * @out, using no more than @olen bytes, using the 842 compression format. * * Returns: 0 on success, error on failure. The @olen parameter * will contain the number of output bytes written on success, or * 0 on error. */ int sw842_compress(const u8 *in, unsigned int ilen, u8 *out, unsigned int *olen, void *wmem) { struct sw842_param *p = (struct sw842_param *)wmem; int ret; u64 last, next, pad, total; u8 repeat_count = 0; u32 crc; BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS); init_hashtable_nodes(p, 8); init_hashtable_nodes(p, 4); init_hashtable_nodes(p, 2); p->in = (u8 *)in; p->instart = p->in; p->ilen = ilen; p->out = out; p->olen = *olen; p->bit = 0; total = p->olen; *olen = 0; /* if using strict mode, we can only compress a multiple of 8 */ if (sw842_strict && (ilen % 8)) { pr_err("Using strict mode, can't compress len %d\n", ilen); return -EINVAL; } /* let's compress at least 8 bytes, mkay? */ if (unlikely(ilen < 8)) goto skip_comp; /* make initial 'last' different so we don't match the first time */ last = ~get_unaligned((u64 *)p->in); while (p->ilen > 7) { next = get_unaligned((u64 *)p->in); /* must get the next data, as we need to update the hashtable * entries with the new data every time */ get_next_data(p); /* we don't care about endianness in last or next; * we're just comparing 8 bytes to another 8 bytes, * they're both the same endianness */ if (next == last) { /* repeat count bits are 0-based, so we stop at +1 */ if (++repeat_count <= REPEAT_BITS_MAX) goto repeat; } if (repeat_count) { ret = add_repeat_template(p, repeat_count); repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; } if (next == 0) ret = add_zeros_template(p); else ret = process_next(p); if (ret) return ret; repeat: last = next; update_hashtables(p); p->in += 8; p->ilen -= 8; } if (repeat_count) { ret = add_repeat_template(p, repeat_count); if (ret) return ret; } skip_comp: if (p->ilen > 0) { ret = add_short_data_template(p, p->ilen); if (ret) return ret; p->in += p->ilen; p->ilen = 0; } ret = add_end_template(p); if (ret) return ret; /* * crc(0:31) is appended to target data starting with the next * bit after End of stream template. * nx842 calculates CRC for data in big-endian format. So doing * same here so that sw842 decompression can be used for both * compressed data. */ crc = crc32_be(0, in, ilen); ret = add_bits(p, crc, CRC_BITS); if (ret) return ret; if (p->bit) { p->out++; p->olen--; p->bit = 0; } /* pad compressed length to multiple of 8 */ pad = (8 - ((total - p->olen) % 8)) % 8; if (pad) { if (pad > p->olen) /* we were so close! */ return -ENOSPC; memset(p->out, 0, pad); p->out += pad; p->olen -= pad; } if (unlikely((total - p->olen) > UINT_MAX)) return -ENOSPC; *olen = total - p->olen; return 0; } EXPORT_SYMBOL_GPL(sw842_compress); static int __init sw842_init(void) { if (sw842_template_counts) sw842_debugfs_create(); return 0; } module_init(sw842_init); static void __exit sw842_exit(void) { if (sw842_template_counts) sw842_debugfs_remove(); } module_exit(sw842_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Software 842 Compressor"); MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
117 117 117 72 117 117 13 13 13 13 13 366 366 366 23 23 392 392 392 392 2 2 2 10 10 10 360 360 360 363 363 363 363 363 363 21 21 21 21 21 21 21 21 21 21 7 4 7 7 131 131 131 131 131 131 76 131 5 71 71 63 63 360 363 1957 363 363 21 363 363 363 363 363 21 363 363 363 363 363 363 363 363 363 363 363 363 363 363 363 21 21 21 21 21 21 21 21 363 363 363 2 2 363 362 2 21 21 21 375 375 375 363 21 375 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 // SPDX-License-Identifier: GPL-2.0 /* * udc.c - Core UDC Framework * * Copyright (C) 2010 Texas Instruments * Author: Felipe Balbi <balbi@ti.com> */ #define pr_fmt(fmt) "UDC core: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/list.h> #include <linux/idr.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/sched/task_stack.h> #include <linux/workqueue.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb.h> #include "trace.h" static DEFINE_IDA(gadget_id_numbers); static const struct bus_type gadget_bus_type; /** * struct usb_udc - describes one usb device controller * @driver: the gadget driver pointer. For use by the class code * @dev: the child device to the actual controller * @gadget: the gadget. For use by the class code * @list: for use by the udc class driver * @vbus: for udcs who care about vbus status, this value is real vbus status; * for udcs who do not care about vbus status, this value is always true * @started: the UDC's started state. True if the UDC had started. * @allow_connect: Indicates whether UDC is allowed to be pulled up. * Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or * unbound. * @vbus_work: work routine to handle VBUS status change notifications. * @connect_lock: protects udc->started, gadget->connect, * gadget->allow_connect and gadget->deactivate. The routines * usb_gadget_connect_locked(), usb_gadget_disconnect_locked(), * usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and * usb_gadget_udc_stop_locked() are called with this lock held. * * This represents the internal data structure which is used by the UDC-class * to hold information about udc driver and gadget together. */ struct usb_udc { struct usb_gadget_driver *driver; struct usb_gadget *gadget; struct device dev; struct list_head list; bool vbus; bool started; bool allow_connect; struct work_struct vbus_work; struct mutex connect_lock; }; static const struct class udc_class; static LIST_HEAD(udc_list); /* Protects udc_list, udc->driver, driver->is_bound, and related calls */ static DEFINE_MUTEX(udc_lock); /* ------------------------------------------------------------------------- */ /** * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint * @ep:the endpoint being configured * @maxpacket_limit:value of maximum packet size limit * * This function should be used only in UDC drivers to initialize endpoint * (usually in probe function). */ void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit) { ep->maxpacket_limit = maxpacket_limit; ep->maxpacket = maxpacket_limit; trace_usb_ep_set_maxpacket_limit(ep, 0); } EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit); /** * usb_ep_enable - configure endpoint, making it usable * @ep:the endpoint being configured. may not be the endpoint named "ep0". * drivers discover endpoints through the ep_list of a usb_gadget. * * When configurations are set, or when interface settings change, the driver * will enable or disable the relevant endpoints. while it is enabled, an * endpoint may be used for i/o until the driver receives a disconnect() from * the host or until the endpoint is disabled. * * the ep0 implementation (which calls this routine) must ensure that the * hardware capabilities of each endpoint match the descriptor provided * for it. for example, an endpoint named "ep2in-bulk" would be usable * for interrupt transfers as well as bulk, but it likely couldn't be used * for iso transfers or for endpoint 14. some endpoints are fully * configurable, with more generic names like "ep-a". (remember that for * USB, "in" means "towards the USB host".) * * This routine may be called in an atomic (interrupt) context. * * returns zero, or a negative error code. */ int usb_ep_enable(struct usb_ep *ep) { int ret = 0; if (ep->enabled) goto out; /* UDC drivers can't handle endpoints with maxpacket size 0 */ if (!ep->desc || usb_endpoint_maxp(ep->desc) == 0) { WARN_ONCE(1, "%s: ep%d (%s) has %s\n", __func__, ep->address, ep->name, (!ep->desc) ? "NULL descriptor" : "maxpacket 0"); ret = -EINVAL; goto out; } ret = ep->ops->enable(ep, ep->desc); if (ret) goto out; ep->enabled = true; out: trace_usb_ep_enable(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_enable); /** * usb_ep_disable - endpoint is no longer usable * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0". * * no other task may be using this endpoint when this is called. * any pending and uncompleted requests will complete with status * indicating disconnect (-ESHUTDOWN) before this call returns. * gadget drivers must call usb_ep_enable() again before queueing * requests to the endpoint. * * This routine may be called in an atomic (interrupt) context. * * returns zero, or a negative error code. */ int usb_ep_disable(struct usb_ep *ep) { int ret = 0; if (!ep->enabled) goto out; ret = ep->ops->disable(ep); if (ret) goto out; ep->enabled = false; out: trace_usb_ep_disable(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_disable); /** * usb_ep_alloc_request - allocate a request object to use with this endpoint * @ep:the endpoint to be used with with the request * @gfp_flags:GFP_* flags to use * * Request objects must be allocated with this call, since they normally * need controller-specific setup and may even need endpoint-specific * resources such as allocation of DMA descriptors. * Requests may be submitted with usb_ep_queue(), and receive a single * completion callback. Free requests with usb_ep_free_request(), when * they are no longer needed. * * Returns the request, or null if one could not be allocated. */ struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) { struct usb_request *req = NULL; req = ep->ops->alloc_request(ep, gfp_flags); trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM); return req; } EXPORT_SYMBOL_GPL(usb_ep_alloc_request); /** * usb_ep_free_request - frees a request object * @ep:the endpoint associated with the request * @req:the request being freed * * Reverses the effect of usb_ep_alloc_request(). * Caller guarantees the request is not queued, and that it will * no longer be requeued (or otherwise used). */ void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req) { trace_usb_ep_free_request(ep, req, 0); ep->ops->free_request(ep, req); } EXPORT_SYMBOL_GPL(usb_ep_free_request); /** * usb_ep_queue - queues (submits) an I/O request to an endpoint. * @ep:the endpoint associated with the request * @req:the request being submitted * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't * pre-allocate all necessary memory with the request. * * This tells the device controller to perform the specified request through * that endpoint (reading or writing a buffer). When the request completes, * including being canceled by usb_ep_dequeue(), the request's completion * routine is called to return the request to the driver. Any endpoint * (except control endpoints like ep0) may have more than one transfer * request queued; they complete in FIFO order. Once a gadget driver * submits a request, that request may not be examined or modified until it * is given back to that driver through the completion callback. * * Each request is turned into one or more packets. The controller driver * never merges adjacent requests into the same packet. OUT transfers * will sometimes use data that's already buffered in the hardware. * Drivers can rely on the fact that the first byte of the request's buffer * always corresponds to the first byte of some USB packet, for both * IN and OUT transfers. * * Bulk endpoints can queue any amount of data; the transfer is packetized * automatically. The last packet will be short if the request doesn't fill it * out completely. Zero length packets (ZLPs) should be avoided in portable * protocols since not all usb hardware can successfully handle zero length * packets. (ZLPs may be explicitly written, and may be implicitly written if * the request 'zero' flag is set.) Bulk endpoints may also be used * for interrupt transfers; but the reverse is not true, and some endpoints * won't support every interrupt transfer. (Such as 768 byte packets.) * * Interrupt-only endpoints are less functional than bulk endpoints, for * example by not supporting queueing or not handling buffers that are * larger than the endpoint's maxpacket size. They may also treat data * toggle differently. * * Control endpoints ... after getting a setup() callback, the driver queues * one response (even if it would be zero length). That enables the * status ack, after transferring data as specified in the response. Setup * functions may return negative error codes to generate protocol stalls. * (Note that some USB device controllers disallow protocol stall responses * in some cases.) When control responses are deferred (the response is * written after the setup callback returns), then usb_ep_set_halt() may be * used on ep0 to trigger protocol stalls. Depending on the controller, * it may not be possible to trigger a status-stage protocol stall when the * data stage is over, that is, from within the response's completion * routine. * * For periodic endpoints, like interrupt or isochronous ones, the usb host * arranges to poll once per interval, and the gadget driver usually will * have queued some data to transfer at that time. * * Note that @req's ->complete() callback must never be called from * within usb_ep_queue() as that can create deadlock situations. * * This routine may be called in interrupt context. * * Returns zero, or a negative error code. Endpoints that are not enabled * report errors; errors will also be * reported when the usb peripheral is disconnected. * * If and only if @req is successfully queued (the return value is zero), * @req->complete() will be called exactly once, when the Gadget core and * UDC are finished with the request. When the completion function is called, * control of the request is returned to the device driver which submitted it. * The completion handler may then immediately free or reuse @req. */ int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags) { int ret = 0; if (!ep->enabled && ep->address) { pr_debug("USB gadget: queue request to disabled ep 0x%x (%s)\n", ep->address, ep->name); ret = -ESHUTDOWN; goto out; } ret = ep->ops->queue(ep, req, gfp_flags); out: trace_usb_ep_queue(ep, req, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_queue); /** * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint * @ep:the endpoint associated with the request * @req:the request being canceled * * If the request is still active on the endpoint, it is dequeued and * eventually its completion routine is called (with status -ECONNRESET); * else a negative error code is returned. This routine is asynchronous, * that is, it may return before the completion routine runs. * * Note that some hardware can't clear out write fifos (to unlink the request * at the head of the queue) except as part of disconnecting from usb. Such * restrictions prevent drivers from supporting configuration changes, * even to configuration zero (a "chapter 9" requirement). * * This routine may be called in interrupt context. */ int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { int ret; ret = ep->ops->dequeue(ep, req); trace_usb_ep_dequeue(ep, req, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_dequeue); /** * usb_ep_set_halt - sets the endpoint halt feature. * @ep: the non-isochronous endpoint being stalled * * Use this to stall an endpoint, perhaps as an error report. * Except for control endpoints, * the endpoint stays halted (will not stream any data) until the host * clears this feature; drivers may need to empty the endpoint's request * queue first, to make sure no inappropriate transfers happen. * * Note that while an endpoint CLEAR_FEATURE will be invisible to the * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the * current altsetting, see usb_ep_clear_halt(). When switching altsettings, * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints. * * This routine may be called in interrupt context. * * Returns zero, or a negative error code. On success, this call sets * underlying hardware state that blocks data transfers. * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any * transfer requests are still queued, or if the controller hardware * (usually a FIFO) still holds bytes that the host hasn't collected. */ int usb_ep_set_halt(struct usb_ep *ep) { int ret; ret = ep->ops->set_halt(ep, 1); trace_usb_ep_set_halt(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_set_halt); /** * usb_ep_clear_halt - clears endpoint halt, and resets toggle * @ep:the bulk or interrupt endpoint being reset * * Use this when responding to the standard usb "set interface" request, * for endpoints that aren't reconfigured, after clearing any other state * in the endpoint's i/o queue. * * This routine may be called in interrupt context. * * Returns zero, or a negative error code. On success, this call clears * the underlying hardware state reflecting endpoint halt and data toggle. * Note that some hardware can't support this request (like pxa2xx_udc), * and accordingly can't correctly implement interface altsettings. */ int usb_ep_clear_halt(struct usb_ep *ep) { int ret; ret = ep->ops->set_halt(ep, 0); trace_usb_ep_clear_halt(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_clear_halt); /** * usb_ep_set_wedge - sets the halt feature and ignores clear requests * @ep: the endpoint being wedged * * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT) * requests. If the gadget driver clears the halt status, it will * automatically unwedge the endpoint. * * This routine may be called in interrupt context. * * Returns zero on success, else negative errno. */ int usb_ep_set_wedge(struct usb_ep *ep) { int ret; if (ep->ops->set_wedge) ret = ep->ops->set_wedge(ep); else ret = ep->ops->set_halt(ep, 1); trace_usb_ep_set_wedge(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_set_wedge); /** * usb_ep_fifo_status - returns number of bytes in fifo, or error * @ep: the endpoint whose fifo status is being checked. * * FIFO endpoints may have "unclaimed data" in them in certain cases, * such as after aborted transfers. Hosts may not have collected all * the IN data written by the gadget driver (and reported by a request * completion). The gadget driver may not have collected all the data * written OUT to it by the host. Drivers that need precise handling for * fault reporting or recovery may need to use this call. * * This routine may be called in interrupt context. * * This returns the number of such bytes in the fifo, or a negative * errno if the endpoint doesn't use a FIFO or doesn't support such * precise handling. */ int usb_ep_fifo_status(struct usb_ep *ep) { int ret; if (ep->ops->fifo_status) ret = ep->ops->fifo_status(ep); else ret = -EOPNOTSUPP; trace_usb_ep_fifo_status(ep, ret); return ret; } EXPORT_SYMBOL_GPL(usb_ep_fifo_status); /** * usb_ep_fifo_flush - flushes contents of a fifo * @ep: the endpoint whose fifo is being flushed. * * This call may be used to flush the "unclaimed data" that may exist in * an endpoint fifo after abnormal transaction terminations. The call * must never be used except when endpoint is not being used for any * protocol translation. * * This routine may be called in interrupt context. */ void usb_ep_fifo_flush(struct usb_ep *ep) { if (ep->ops->fifo_flush) ep->ops->fifo_flush(ep); trace_usb_ep_fifo_flush(ep, 0); } EXPORT_SYMBOL_GPL(usb_ep_fifo_flush); /* ------------------------------------------------------------------------- */ /** * usb_gadget_frame_number - returns the current frame number * @gadget: controller that reports the frame number * * Returns the usb frame number, normally eleven bits from a SOF packet, * or negative errno if this device doesn't support this capability. */ int usb_gadget_frame_number(struct usb_gadget *gadget) { int ret; ret = gadget->ops->get_frame(gadget); trace_usb_gadget_frame_number(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_frame_number); /** * usb_gadget_wakeup - tries to wake up the host connected to this gadget * @gadget: controller used to wake up the host * * Returns zero on success, else negative error code if the hardware * doesn't support such attempts, or its support has not been enabled * by the usb host. Drivers must return device descriptors that report * their ability to support this, or hosts won't enable it. * * This may also try to use SRP to wake the host and start enumeration, * even if OTG isn't otherwise in use. OTG devices may also start * remote wakeup even when hosts don't explicitly enable it. */ int usb_gadget_wakeup(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->wakeup) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->wakeup(gadget); out: trace_usb_gadget_wakeup(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_wakeup); /** * usb_gadget_set_remote_wakeup - configures the device remote wakeup feature. * @gadget:the device being configured for remote wakeup * @set:value to be configured. * * set to one to enable remote wakeup feature and zero to disable it. * * returns zero on success, else negative errno. */ int usb_gadget_set_remote_wakeup(struct usb_gadget *gadget, int set) { int ret = 0; if (!gadget->ops->set_remote_wakeup) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->set_remote_wakeup(gadget, set); out: trace_usb_gadget_set_remote_wakeup(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_set_remote_wakeup); /** * usb_gadget_set_selfpowered - sets the device selfpowered feature. * @gadget:the device being declared as self-powered * * this affects the device status reported by the hardware driver * to reflect that it now has a local power supply. * * returns zero on success, else negative errno. */ int usb_gadget_set_selfpowered(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->set_selfpowered) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->set_selfpowered(gadget, 1); out: trace_usb_gadget_set_selfpowered(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_set_selfpowered); /** * usb_gadget_clear_selfpowered - clear the device selfpowered feature. * @gadget:the device being declared as bus-powered * * this affects the device status reported by the hardware driver. * some hardware may not support bus-powered operation, in which * case this feature's value can never change. * * returns zero on success, else negative errno. */ int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->set_selfpowered) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->set_selfpowered(gadget, 0); out: trace_usb_gadget_clear_selfpowered(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_clear_selfpowered); /** * usb_gadget_vbus_connect - Notify controller that VBUS is powered * @gadget:The device which now has VBUS power. * Context: can sleep * * This call is used by a driver for an external transceiver (or GPIO) * that detects a VBUS power session starting. Common responses include * resuming the controller, activating the D+ (or D-) pullup to let the * host detect that a USB device is attached, and starting to draw power * (8mA or possibly more, especially after SET_CONFIGURATION). * * Returns zero on success, else negative errno. */ int usb_gadget_vbus_connect(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->vbus_session) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->vbus_session(gadget, 1); out: trace_usb_gadget_vbus_connect(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_vbus_connect); /** * usb_gadget_vbus_draw - constrain controller's VBUS power usage * @gadget:The device whose VBUS usage is being described * @mA:How much current to draw, in milliAmperes. This should be twice * the value listed in the configuration descriptor bMaxPower field. * * This call is used by gadget drivers during SET_CONFIGURATION calls, * reporting how much power the device may consume. For example, this * could affect how quickly batteries are recharged. * * Returns zero on success, else negative errno. */ int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) { int ret = 0; if (!gadget->ops->vbus_draw) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->vbus_draw(gadget, mA); if (!ret) gadget->mA = mA; out: trace_usb_gadget_vbus_draw(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_vbus_draw); /** * usb_gadget_vbus_disconnect - notify controller about VBUS session end * @gadget:the device whose VBUS supply is being described * Context: can sleep * * This call is used by a driver for an external transceiver (or GPIO) * that detects a VBUS power session ending. Common responses include * reversing everything done in usb_gadget_vbus_connect(). * * Returns zero on success, else negative errno. */ int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) { int ret = 0; if (!gadget->ops->vbus_session) { ret = -EOPNOTSUPP; goto out; } ret = gadget->ops->vbus_session(gadget, 0); out: trace_usb_gadget_vbus_disconnect(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect); static int usb_gadget_connect_locked(struct usb_gadget *gadget) __must_hold(&gadget->udc->connect_lock) { int ret = 0; if (!gadget->ops->pullup) { ret = -EOPNOTSUPP; goto out; } if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) { /* * If the gadget isn't usable (because it is deactivated, * unbound, or not yet started), we only save the new state. * The gadget will be connected automatically when it is * activated/bound/started. */ gadget->connected = true; goto out; } ret = gadget->ops->pullup(gadget, 1); if (!ret) gadget->connected = 1; out: trace_usb_gadget_connect(gadget, ret); return ret; } /** * usb_gadget_connect - software-controlled connect to USB host * @gadget:the peripheral being connected * * Enables the D+ (or potentially D-) pullup. The host will start * enumerating this gadget when the pullup is active and a VBUS session * is active (the link is powered). * * Returns zero on success, else negative errno. */ int usb_gadget_connect(struct usb_gadget *gadget) { int ret; mutex_lock(&gadget->udc->connect_lock); ret = usb_gadget_connect_locked(gadget); mutex_unlock(&gadget->udc->connect_lock); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_connect); static int usb_gadget_disconnect_locked(struct usb_gadget *gadget) __must_hold(&gadget->udc->connect_lock) { int ret = 0; if (!gadget->ops->pullup) { ret = -EOPNOTSUPP; goto out; } if (!gadget->connected) goto out; if (gadget->deactivated || !gadget->udc->started) { /* * If gadget is deactivated we only save new state. * Gadget will stay disconnected after activation. */ gadget->connected = false; goto out; } ret = gadget->ops->pullup(gadget, 0); if (!ret) gadget->connected = 0; mutex_lock(&udc_lock); if (gadget->udc->driver) gadget->udc->driver->disconnect(gadget); mutex_unlock(&udc_lock); out: trace_usb_gadget_disconnect(gadget, ret); return ret; } /** * usb_gadget_disconnect - software-controlled disconnect from USB host * @gadget:the peripheral being disconnected * * Disables the D+ (or potentially D-) pullup, which the host may see * as a disconnect (when a VBUS session is active). Not all systems * support software pullup controls. * * Following a successful disconnect, invoke the ->disconnect() callback * for the current gadget driver so that UDC drivers don't need to. * * Returns zero on success, else negative errno. */ int usb_gadget_disconnect(struct usb_gadget *gadget) { int ret; mutex_lock(&gadget->udc->connect_lock); ret = usb_gadget_disconnect_locked(gadget); mutex_unlock(&gadget->udc->connect_lock); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_disconnect); /** * usb_gadget_deactivate - deactivate function which is not ready to work * @gadget: the peripheral being deactivated * * This routine may be used during the gadget driver bind() call to prevent * the peripheral from ever being visible to the USB host, unless later * usb_gadget_activate() is called. For example, user mode components may * need to be activated before the system can talk to hosts. * * This routine may sleep; it must not be called in interrupt context * (such as from within a gadget driver's disconnect() callback). * * Returns zero on success, else negative errno. */ int usb_gadget_deactivate(struct usb_gadget *gadget) { int ret = 0; mutex_lock(&gadget->udc->connect_lock); if (gadget->deactivated) goto unlock; if (gadget->connected) { ret = usb_gadget_disconnect_locked(gadget); if (ret) goto unlock; /* * If gadget was being connected before deactivation, we want * to reconnect it in usb_gadget_activate(). */ gadget->connected = true; } gadget->deactivated = true; unlock: mutex_unlock(&gadget->udc->connect_lock); trace_usb_gadget_deactivate(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_deactivate); /** * usb_gadget_activate - activate function which is not ready to work * @gadget: the peripheral being activated * * This routine activates gadget which was previously deactivated with * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed. * * This routine may sleep; it must not be called in interrupt context. * * Returns zero on success, else negative errno. */ int usb_gadget_activate(struct usb_gadget *gadget) { int ret = 0; mutex_lock(&gadget->udc->connect_lock); if (!gadget->deactivated) goto unlock; gadget->deactivated = false; /* * If gadget has been connected before deactivation, or became connected * while it was being deactivated, we call usb_gadget_connect(). */ if (gadget->connected) ret = usb_gadget_connect_locked(gadget); unlock: mutex_unlock(&gadget->udc->connect_lock); trace_usb_gadget_activate(gadget, ret); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_activate); /* ------------------------------------------------------------------------- */ #ifdef CONFIG_HAS_DMA int usb_gadget_map_request_by_dev(struct device *dev, struct usb_request *req, int is_in) { if (req->length == 0) return 0; if (req->sg_was_mapped) { req->num_mapped_sgs = req->num_sgs; return 0; } if (req->num_sgs) { int mapped; mapped = dma_map_sg(dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mapped == 0) { dev_err(dev, "failed to map SGs\n"); return -EFAULT; } req->num_mapped_sgs = mapped; } else { if (is_vmalloc_addr(req->buf)) { dev_err(dev, "buffer is not dma capable\n"); return -EFAULT; } else if (object_is_on_stack(req->buf)) { dev_err(dev, "buffer is on stack\n"); return -EFAULT; } req->dma = dma_map_single(dev, req->buf, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (dma_mapping_error(dev, req->dma)) { dev_err(dev, "failed to map buffer\n"); return -EFAULT; } req->dma_mapped = 1; } return 0; } EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev); int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in); } EXPORT_SYMBOL_GPL(usb_gadget_map_request); void usb_gadget_unmap_request_by_dev(struct device *dev, struct usb_request *req, int is_in) { if (req->length == 0 || req->sg_was_mapped) return; if (req->num_mapped_sgs) { dma_unmap_sg(dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->num_mapped_sgs = 0; } else if (req->dma_mapped) { dma_unmap_single(dev, req->dma, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->dma_mapped = 0; } } EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev); void usb_gadget_unmap_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in); } EXPORT_SYMBOL_GPL(usb_gadget_unmap_request); #endif /* CONFIG_HAS_DMA */ /* ------------------------------------------------------------------------- */ /** * usb_gadget_giveback_request - give the request back to the gadget layer * @ep: the endpoint to be used with with the request * @req: the request being given back * * This is called by device controller drivers in order to return the * completed request back to the gadget layer. */ void usb_gadget_giveback_request(struct usb_ep *ep, struct usb_request *req) { if (likely(req->status == 0)) usb_led_activity(USB_LED_EVENT_GADGET); trace_usb_gadget_giveback_request(ep, req, 0); req->complete(ep, req); } EXPORT_SYMBOL_GPL(usb_gadget_giveback_request); /* ------------------------------------------------------------------------- */ /** * gadget_find_ep_by_name - returns ep whose name is the same as sting passed * in second parameter or NULL if searched endpoint not found * @g: controller to check for quirk * @name: name of searched endpoint */ struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name) { struct usb_ep *ep; gadget_for_each_ep(ep, g) { if (!strcmp(ep->name, name)) return ep; } return NULL; } EXPORT_SYMBOL_GPL(gadget_find_ep_by_name); /* ------------------------------------------------------------------------- */ int usb_gadget_ep_match_desc(struct usb_gadget *gadget, struct usb_ep *ep, struct usb_endpoint_descriptor *desc, struct usb_ss_ep_comp_descriptor *ep_comp) { u8 type; u16 max; int num_req_streams = 0; /* endpoint already claimed? */ if (ep->claimed) return 0; type = usb_endpoint_type(desc); max = usb_endpoint_maxp(desc); if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in) return 0; if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out) return 0; if (max > ep->maxpacket_limit) return 0; /* "high bandwidth" works only at high speed */ if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1) return 0; switch (type) { case USB_ENDPOINT_XFER_CONTROL: /* only support ep0 for portable CONTROL traffic */ return 0; case USB_ENDPOINT_XFER_ISOC: if (!ep->caps.type_iso) return 0; /* ISO: limit 1023 bytes full speed, 1024 high/super speed */ if (!gadget_is_dualspeed(gadget) && max > 1023) return 0; break; case USB_ENDPOINT_XFER_BULK: if (!ep->caps.type_bulk) return 0; if (ep_comp && gadget_is_superspeed(gadget)) { /* Get the number of required streams from the * EP companion descriptor and see if the EP * matches it */ num_req_streams = ep_comp->bmAttributes & 0x1f; if (num_req_streams > ep->max_streams) return 0; } break; case USB_ENDPOINT_XFER_INT: /* Bulk endpoints handle interrupt transfers, * except the toggle-quirky iso-synch kind */ if (!ep->caps.type_int && !ep->caps.type_bulk) return 0; /* INT: limit 64 bytes full speed, 1024 high/super speed */ if (!gadget_is_dualspeed(gadget) && max > 64) return 0; break; } return 1; } EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc); /** * usb_gadget_check_config - checks if the UDC can support the binded * configuration * @gadget: controller to check the USB configuration * * Ensure that a UDC is able to support the requested resources by a * configuration, and that there are no resource limitations, such as * internal memory allocated to all requested endpoints. * * Returns zero on success, else a negative errno. */ int usb_gadget_check_config(struct usb_gadget *gadget) { if (gadget->ops->check_config) return gadget->ops->check_config(gadget); return 0; } EXPORT_SYMBOL_GPL(usb_gadget_check_config); /* ------------------------------------------------------------------------- */ static void usb_gadget_state_work(struct work_struct *work) { struct usb_gadget *gadget = work_to_gadget(work); struct usb_udc *udc = gadget->udc; if (udc) sysfs_notify(&udc->dev.kobj, NULL, "state"); } void usb_gadget_set_state(struct usb_gadget *gadget, enum usb_device_state state) { gadget->state = state; schedule_work(&gadget->work); } EXPORT_SYMBOL_GPL(usb_gadget_set_state); /* ------------------------------------------------------------------------- */ /* Acquire connect_lock before calling this function. */ static int usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) { if (udc->vbus) return usb_gadget_connect_locked(udc->gadget); else return usb_gadget_disconnect_locked(udc->gadget); } static void vbus_event_work(struct work_struct *work) { struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work); mutex_lock(&udc->connect_lock); usb_udc_connect_control_locked(udc); mutex_unlock(&udc->connect_lock); } /** * usb_udc_vbus_handler - updates the udc core vbus status, and try to * connect or disconnect gadget * @gadget: The gadget which vbus change occurs * @status: The vbus status * * The udc driver calls it when it wants to connect or disconnect gadget * according to vbus status. * * This function can be invoked from interrupt context by irq handlers of * the gadget drivers, however, usb_udc_connect_control() has to run in * non-atomic context due to the following: * a. Some of the gadget driver implementations expect the ->pullup * callback to be invoked in non-atomic context. * b. usb_gadget_disconnect() acquires udc_lock which is a mutex. * Hence offload invocation of usb_udc_connect_control() to workqueue. */ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status) { struct usb_udc *udc = gadget->udc; if (udc) { udc->vbus = status; schedule_work(&udc->vbus_work); } } EXPORT_SYMBOL_GPL(usb_udc_vbus_handler); /** * usb_gadget_udc_reset - notifies the udc core that bus reset occurs * @gadget: The gadget which bus reset occurs * @driver: The gadget driver we want to notify * * If the udc driver has bus reset handler, it needs to call this when the bus * reset occurs, it notifies the gadget driver that the bus reset occurs as * well as updates gadget state. */ void usb_gadget_udc_reset(struct usb_gadget *gadget, struct usb_gadget_driver *driver) { driver->reset(gadget); usb_gadget_set_state(gadget, USB_STATE_DEFAULT); } EXPORT_SYMBOL_GPL(usb_gadget_udc_reset); /** * usb_gadget_udc_start_locked - tells usb device controller to start up * @udc: The UDC to be started * * This call is issued by the UDC Class driver when it's about * to register a gadget driver to the device controller, before * calling gadget driver's bind() method. * * It allows the controller to be powered off until strictly * necessary to have it powered on. * * Returns zero on success, else negative errno. * * Caller should acquire connect_lock before invoking this function. */ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) { int ret; if (udc->started) { dev_err(&udc->dev, "UDC had already started\n"); return -EBUSY; } ret = udc->gadget->ops->udc_start(udc->gadget, udc->driver); if (!ret) udc->started = true; return ret; } /** * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore * @udc: The UDC to be stopped * * This call is issued by the UDC Class driver after calling * gadget driver's unbind() method. * * The details are implementation specific, but it can go as * far as powering off UDC completely and disable its data * line pullups. * * Caller should acquire connect lock before invoking this function. */ static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock) { if (!udc->started) { dev_err(&udc->dev, "UDC had already stopped\n"); return; } udc->gadget->ops->udc_stop(udc->gadget); udc->started = false; } /** * usb_gadget_udc_set_speed - tells usb device controller speed supported by * current driver * @udc: The device we want to set maximum speed * @speed: The maximum speed to allowed to run * * This call is issued by the UDC Class driver before calling * usb_gadget_udc_start() in order to make sure that we don't try to * connect on speeds the gadget driver doesn't support. */ static inline void usb_gadget_udc_set_speed(struct usb_udc *udc, enum usb_device_speed speed) { struct usb_gadget *gadget = udc->gadget; enum usb_device_speed s; if (speed == USB_SPEED_UNKNOWN) s = gadget->max_speed; else s = min(speed, gadget->max_speed); if (s == USB_SPEED_SUPER_PLUS && gadget->ops->udc_set_ssp_rate) gadget->ops->udc_set_ssp_rate(gadget, gadget->max_ssp_rate); else if (gadget->ops->udc_set_speed) gadget->ops->udc_set_speed(gadget, s); } /** * usb_gadget_enable_async_callbacks - tell usb device controller to enable asynchronous callbacks * @udc: The UDC which should enable async callbacks * * This routine is used when binding gadget drivers. It undoes the effect * of usb_gadget_disable_async_callbacks(); the UDC driver should enable IRQs * (if necessary) and resume issuing callbacks. * * This routine will always be called in process context. */ static inline void usb_gadget_enable_async_callbacks(struct usb_udc *udc) { struct usb_gadget *gadget = udc->gadget; if (gadget->ops->udc_async_callbacks) gadget->ops->udc_async_callbacks(gadget, true); } /** * usb_gadget_disable_async_callbacks - tell usb device controller to disable asynchronous callbacks * @udc: The UDC which should disable async callbacks * * This routine is used when unbinding gadget drivers. It prevents a race: * The UDC driver doesn't know when the gadget driver's ->unbind callback * runs, so unless it is told to disable asynchronous callbacks, it might * issue a callback (such as ->disconnect) after the unbind has completed. * * After this function runs, the UDC driver must suppress all ->suspend, * ->resume, ->disconnect, ->reset, and ->setup callbacks to the gadget driver * until async callbacks are again enabled. A simple-minded but effective * way to accomplish this is to tell the UDC hardware not to generate any * more IRQs. * * Request completion callbacks must still be issued. However, it's okay * to defer them until the request is cancelled, since the pull-up will be * turned off during the time period when async callbacks are disabled. * * This routine will always be called in process context. */ static inline void usb_gadget_disable_async_callbacks(struct usb_udc *udc) { struct usb_gadget *gadget = udc->gadget; if (gadget->ops->udc_async_callbacks) gadget->ops->udc_async_callbacks(gadget, false); } /** * usb_udc_release - release the usb_udc struct * @dev: the dev member within usb_udc * * This is called by driver's core in order to free memory once the last * reference is released. */ static void usb_udc_release(struct device *dev) { struct usb_udc *udc; udc = container_of(dev, struct usb_udc, dev); dev_dbg(dev, "releasing '%s'\n", dev_name(dev)); kfree(udc); } static const struct attribute_group *usb_udc_attr_groups[]; static void usb_udc_nop_release(struct device *dev) { dev_vdbg(dev, "%s\n", __func__); } /** * usb_initialize_gadget - initialize a gadget and its embedded struct device * @parent: the parent device to this udc. Usually the controller driver's * device. * @gadget: the gadget to be initialized. * @release: a gadget release function. */ void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)) { INIT_WORK(&gadget->work, usb_gadget_state_work); gadget->dev.parent = parent; if (release) gadget->dev.release = release; else gadget->dev.release = usb_udc_nop_release; device_initialize(&gadget->dev); gadget->dev.bus = &gadget_bus_type; } EXPORT_SYMBOL_GPL(usb_initialize_gadget); /** * usb_add_gadget - adds a new gadget to the udc class driver list * @gadget: the gadget to be added to the list. * * Returns zero on success, negative errno otherwise. * Does not do a final usb_put_gadget() if an error occurs. */ int usb_add_gadget(struct usb_gadget *gadget) { struct usb_udc *udc; int ret = -ENOMEM; udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) goto error; device_initialize(&udc->dev); udc->dev.release = usb_udc_release; udc->dev.class = &udc_class; udc->dev.groups = usb_udc_attr_groups; udc->dev.parent = gadget->dev.parent; ret = dev_set_name(&udc->dev, "%s", kobject_name(&gadget->dev.parent->kobj)); if (ret) goto err_put_udc; udc->gadget = gadget; gadget->udc = udc; mutex_init(&udc->connect_lock); udc->started = false; mutex_lock(&udc_lock); list_add_tail(&udc->list, &udc_list); mutex_unlock(&udc_lock); INIT_WORK(&udc->vbus_work, vbus_event_work); ret = device_add(&udc->dev); if (ret) goto err_unlist_udc; usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED); udc->vbus = true; ret = ida_alloc(&gadget_id_numbers, GFP_KERNEL); if (ret < 0) goto err_del_udc; gadget->id_number = ret; dev_set_name(&gadget->dev, "gadget.%d", ret); ret = device_add(&gadget->dev); if (ret) goto err_free_id; ret = sysfs_create_link(&udc->dev.kobj, &gadget->dev.kobj, "gadget"); if (ret) goto err_del_gadget; return 0; err_del_gadget: device_del(&gadget->dev); err_free_id: ida_free(&gadget_id_numbers, gadget->id_number); err_del_udc: flush_work(&gadget->work); device_del(&udc->dev); err_unlist_udc: mutex_lock(&udc_lock); list_del(&udc->list); mutex_unlock(&udc_lock); err_put_udc: put_device(&udc->dev); error: return ret; } EXPORT_SYMBOL_GPL(usb_add_gadget); /** * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list * @parent: the parent device to this udc. Usually the controller driver's * device. * @gadget: the gadget to be added to the list. * @release: a gadget release function. * * Returns zero on success, negative errno otherwise. * Calls the gadget release function in the latter case. */ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)) { int ret; usb_initialize_gadget(parent, gadget, release); ret = usb_add_gadget(gadget); if (ret) usb_put_gadget(gadget); return ret; } EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release); /** * usb_get_gadget_udc_name - get the name of the first UDC controller * This functions returns the name of the first UDC controller in the system. * Please note that this interface is usefull only for legacy drivers which * assume that there is only one UDC controller in the system and they need to * get its name before initialization. There is no guarantee that the UDC * of the returned name will be still available, when gadget driver registers * itself. * * Returns pointer to string with UDC controller name on success, NULL * otherwise. Caller should kfree() returned string. */ char *usb_get_gadget_udc_name(void) { struct usb_udc *udc; char *name = NULL; /* For now we take the first available UDC */ mutex_lock(&udc_lock); list_for_each_entry(udc, &udc_list, list) { if (!udc->driver) { name = kstrdup(udc->gadget->name, GFP_KERNEL); break; } } mutex_unlock(&udc_lock); return name; } EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name); /** * usb_add_gadget_udc - adds a new gadget to the udc class driver list * @parent: the parent device to this udc. Usually the controller * driver's device. * @gadget: the gadget to be added to the list * * Returns zero on success, negative errno otherwise. */ int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget) { return usb_add_gadget_udc_release(parent, gadget, NULL); } EXPORT_SYMBOL_GPL(usb_add_gadget_udc); /** * usb_del_gadget - deletes a gadget and unregisters its udc * @gadget: the gadget to be deleted. * * This will unbind @gadget, if it is bound. * It will not do a final usb_put_gadget(). */ void usb_del_gadget(struct usb_gadget *gadget) { struct usb_udc *udc = gadget->udc; if (!udc) return; dev_vdbg(gadget->dev.parent, "unregistering gadget\n"); mutex_lock(&udc_lock); list_del(&udc->list); mutex_unlock(&udc_lock); kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE); sysfs_remove_link(&udc->dev.kobj, "gadget"); flush_work(&gadget->work); device_del(&gadget->dev); ida_free(&gadget_id_numbers, gadget->id_number); cancel_work_sync(&udc->vbus_work); device_unregister(&udc->dev); } EXPORT_SYMBOL_GPL(usb_del_gadget); /** * usb_del_gadget_udc - unregisters a gadget * @gadget: the gadget to be unregistered. * * Calls usb_del_gadget() and does a final usb_put_gadget(). */ void usb_del_gadget_udc(struct usb_gadget *gadget) { usb_del_gadget(gadget); usb_put_gadget(gadget); } EXPORT_SYMBOL_GPL(usb_del_gadget_udc); /* ------------------------------------------------------------------------- */ static int gadget_match_driver(struct device *dev, const struct device_driver *drv) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_udc *udc = gadget->udc; struct usb_gadget_driver *driver = container_of(drv, struct usb_gadget_driver, driver); /* If the driver specifies a udc_name, it must match the UDC's name */ if (driver->udc_name && strcmp(driver->udc_name, dev_name(&udc->dev)) != 0) return 0; /* If the driver is already bound to a gadget, it doesn't match */ if (driver->is_bound) return 0; /* Otherwise any gadget driver matches any UDC */ return 1; } static int gadget_bind_driver(struct device *dev) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_udc *udc = gadget->udc; struct usb_gadget_driver *driver = container_of(dev->driver, struct usb_gadget_driver, driver); int ret = 0; mutex_lock(&udc_lock); if (driver->is_bound) { mutex_unlock(&udc_lock); return -ENXIO; /* Driver binds to only one gadget */ } driver->is_bound = true; udc->driver = driver; mutex_unlock(&udc_lock); dev_dbg(&udc->dev, "binding gadget driver [%s]\n", driver->function); usb_gadget_udc_set_speed(udc, driver->max_speed); ret = driver->bind(udc->gadget, driver); if (ret) goto err_bind; mutex_lock(&udc->connect_lock); ret = usb_gadget_udc_start_locked(udc); if (ret) { mutex_unlock(&udc->connect_lock); goto err_start; } usb_gadget_enable_async_callbacks(udc); udc->allow_connect = true; ret = usb_udc_connect_control_locked(udc); if (ret) goto err_connect_control; mutex_unlock(&udc->connect_lock); kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); return 0; err_connect_control: udc->allow_connect = false; usb_gadget_disable_async_callbacks(udc); if (gadget->irq) synchronize_irq(gadget->irq); usb_gadget_udc_stop_locked(udc); mutex_unlock(&udc->connect_lock); err_start: driver->unbind(udc->gadget); err_bind: if (ret != -EISNAM) dev_err(&udc->dev, "failed to start %s: %d\n", driver->function, ret); mutex_lock(&udc_lock); udc->driver = NULL; driver->is_bound = false; mutex_unlock(&udc_lock); return ret; } static void gadget_unbind_driver(struct device *dev) { struct usb_gadget *gadget = dev_to_usb_gadget(dev); struct usb_udc *udc = gadget->udc; struct usb_gadget_driver *driver = udc->driver; dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function); udc->allow_connect = false; cancel_work_sync(&udc->vbus_work); mutex_lock(&udc->connect_lock); usb_gadget_disconnect_locked(gadget); usb_gadget_disable_async_callbacks(udc); if (gadget->irq) synchronize_irq(gadget->irq); mutex_unlock(&udc->connect_lock); udc->driver->unbind(gadget); mutex_lock(&udc->connect_lock); usb_gadget_udc_stop_locked(udc); mutex_unlock(&udc->connect_lock); mutex_lock(&udc_lock); driver->is_bound = false; udc->driver = NULL; mutex_unlock(&udc_lock); kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); } /* ------------------------------------------------------------------------- */ int usb_gadget_register_driver_owner(struct usb_gadget_driver *driver, struct module *owner, const char *mod_name) { int ret; if (!driver || !driver->bind || !driver->setup) return -EINVAL; driver->driver.bus = &gadget_bus_type; driver->driver.owner = owner; driver->driver.mod_name = mod_name; driver->driver.probe_type = PROBE_FORCE_SYNCHRONOUS; ret = driver_register(&driver->driver); if (ret) { pr_warn("%s: driver registration failed: %d\n", driver->function, ret); return ret; } mutex_lock(&udc_lock); if (!driver->is_bound) { if (driver->match_existing_only) { pr_warn("%s: couldn't find an available UDC or it's busy\n", driver->function); ret = -EBUSY; } else { pr_info("%s: couldn't find an available UDC\n", driver->function); ret = 0; } } mutex_unlock(&udc_lock); if (ret) driver_unregister(&driver->driver); return ret; } EXPORT_SYMBOL_GPL(usb_gadget_register_driver_owner); int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { if (!driver || !driver->unbind) return -EINVAL; driver_unregister(&driver->driver); return 0; } EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver); /* ------------------------------------------------------------------------- */ static ssize_t srp_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); if (sysfs_streq(buf, "1")) usb_gadget_wakeup(udc->gadget); return n; } static DEVICE_ATTR_WO(srp); static ssize_t soft_connect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); ssize_t ret; device_lock(&udc->gadget->dev); if (!udc->driver) { dev_err(dev, "soft-connect without a gadget driver\n"); ret = -EOPNOTSUPP; goto out; } if (sysfs_streq(buf, "connect")) { mutex_lock(&udc->connect_lock); usb_gadget_udc_start_locked(udc); usb_gadget_connect_locked(udc->gadget); mutex_unlock(&udc->connect_lock); } else if (sysfs_streq(buf, "disconnect")) { mutex_lock(&udc->connect_lock); usb_gadget_disconnect_locked(udc->gadget); usb_gadget_udc_stop_locked(udc); mutex_unlock(&udc->connect_lock); } else { dev_err(dev, "unsupported command '%s'\n", buf); ret = -EINVAL; goto out; } ret = n; out: device_unlock(&udc->gadget->dev); return ret; } static DEVICE_ATTR_WO(soft_connect); static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); struct usb_gadget *gadget = udc->gadget; return sprintf(buf, "%s\n", usb_state_string(gadget->state)); } static DEVICE_ATTR_RO(state); static ssize_t function_show(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); struct usb_gadget_driver *drv; int rc = 0; mutex_lock(&udc_lock); drv = udc->driver; if (drv && drv->function) rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function); mutex_unlock(&udc_lock); return rc; } static DEVICE_ATTR_RO(function); #define USB_UDC_SPEED_ATTR(name, param) \ ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \ return scnprintf(buf, PAGE_SIZE, "%s\n", \ usb_speed_string(udc->gadget->param)); \ } \ static DEVICE_ATTR_RO(name) static USB_UDC_SPEED_ATTR(current_speed, speed); static USB_UDC_SPEED_ATTR(maximum_speed, max_speed); #define USB_UDC_ATTR(name) \ ssize_t name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \ struct usb_gadget *gadget = udc->gadget; \ \ return scnprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \ } \ static DEVICE_ATTR_RO(name) static USB_UDC_ATTR(is_otg); static USB_UDC_ATTR(is_a_peripheral); static USB_UDC_ATTR(b_hnp_enable); static USB_UDC_ATTR(a_hnp_support); static USB_UDC_ATTR(a_alt_hnp_support); static USB_UDC_ATTR(is_selfpowered); static struct attribute *usb_udc_attrs[] = { &dev_attr_srp.attr, &dev_attr_soft_connect.attr, &dev_attr_state.attr, &dev_attr_function.attr, &dev_attr_current_speed.attr, &dev_attr_maximum_speed.attr, &dev_attr_is_otg.attr, &dev_attr_is_a_peripheral.attr, &dev_attr_b_hnp_enable.attr, &dev_attr_a_hnp_support.attr, &dev_attr_a_alt_hnp_support.attr, &dev_attr_is_selfpowered.attr, NULL, }; static const struct attribute_group usb_udc_attr_group = { .attrs = usb_udc_attrs, }; static const struct attribute_group *usb_udc_attr_groups[] = { &usb_udc_attr_group, NULL, }; static int usb_udc_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct usb_udc *udc = container_of(dev, struct usb_udc, dev); int ret; ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name); if (ret) { dev_err(dev, "failed to add uevent USB_UDC_NAME\n"); return ret; } mutex_lock(&udc_lock); if (udc->driver) ret = add_uevent_var(env, "USB_UDC_DRIVER=%s", udc->driver->function); mutex_unlock(&udc_lock); if (ret) { dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n"); return ret; } return 0; } static const struct class udc_class = { .name = "udc", .dev_uevent = usb_udc_uevent, }; static const struct bus_type gadget_bus_type = { .name = "gadget", .probe = gadget_bind_driver, .remove = gadget_unbind_driver, .match = gadget_match_driver, }; static int __init usb_udc_init(void) { int rc; rc = class_register(&udc_class); if (rc) return rc; rc = bus_register(&gadget_bus_type); if (rc) class_unregister(&udc_class); return rc; } subsys_initcall(usb_udc_init); static void __exit usb_udc_exit(void) { bus_unregister(&gadget_bus_type); class_unregister(&udc_class); } module_exit(usb_udc_exit); MODULE_DESCRIPTION("UDC Framework"); MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); MODULE_LICENSE("GPL v2");
12 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_IO_READ_H #define _BCACHEFS_IO_READ_H #include "bkey_buf.h" #include "reflink.h" struct bch_read_bio { struct bch_fs *c; u64 start_time; u64 submit_time; /* * Reads will often have to be split, and if the extent being read from * was checksummed or compressed we'll also have to allocate bounce * buffers and copy the data back into the original bio. * * If we didn't have to split, we have to save and restore the original * bi_end_io - @split below indicates which: */ union { struct bch_read_bio *parent; bio_end_io_t *end_io; }; /* * Saved copy of bio->bi_iter, from submission time - allows us to * resubmit on IO error, and also to copy data back to the original bio * when we're bouncing: */ struct bvec_iter bvec_iter; unsigned offset_into_extent; u16 flags; union { struct { u16 bounce:1, split:1, kmalloc:1, have_ioref:1, narrow_crcs:1, hole:1, retry:2, context:2; }; u16 _state; }; struct bch_devs_list devs_have; struct extent_ptr_decoded pick; /* * pos we read from - different from data_pos for indirect extents: */ u32 subvol; struct bpos read_pos; /* * start pos of data we read (may not be pos of data we want) - for * promote, narrow extents paths: */ enum btree_id data_btree; struct bpos data_pos; struct bversion version; struct promote_op *promote; struct bch_io_opts opts; struct work_struct work; struct bio bio; }; #define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio) struct bch_devs_mask; struct cache_promote_op; struct extent_ptr_decoded; static inline int bch2_read_indirect_extent(struct btree_trans *trans, enum btree_id *data_btree, s64 *offset_into_extent, struct bkey_buf *extent) { if (extent->k->k.type != KEY_TYPE_reflink_p) return 0; *data_btree = BTREE_ID_reflink; struct btree_iter iter; struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter, offset_into_extent, bkey_i_to_s_c_reflink_p(extent->k), true, 0); int ret = bkey_err(k); if (ret) return ret; if (bkey_deleted(k.k)) { bch2_trans_iter_exit(trans, &iter); return -BCH_ERR_missing_indirect_extent; } bch2_bkey_buf_reassemble(extent, trans->c, k); bch2_trans_iter_exit(trans, &iter); return 0; } enum bch_read_flags { BCH_READ_RETRY_IF_STALE = 1 << 0, BCH_READ_MAY_PROMOTE = 1 << 1, BCH_READ_USER_MAPPED = 1 << 2, BCH_READ_NODECODE = 1 << 3, BCH_READ_LAST_FRAGMENT = 1 << 4, /* internal: */ BCH_READ_MUST_BOUNCE = 1 << 5, BCH_READ_MUST_CLONE = 1 << 6, BCH_READ_IN_RETRY = 1 << 7, }; int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *, struct bvec_iter, struct bpos, enum btree_id, struct bkey_s_c, unsigned, struct bch_io_failures *, unsigned); static inline void bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio, struct bpos read_pos, enum btree_id data_btree, struct bkey_s_c k, unsigned offset_into_extent, unsigned flags) { __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos, data_btree, k, offset_into_extent, NULL, flags); } void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter, subvol_inum, struct bch_io_failures *, unsigned flags); static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, subvol_inum inum) { struct bch_io_failures failed = { .nr = 0 }; BUG_ON(rbio->_state); rbio->c = c; rbio->start_time = local_clock(); rbio->subvol = inum.subvol; __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed, BCH_READ_RETRY_IF_STALE| BCH_READ_MAY_PROMOTE| BCH_READ_USER_MAPPED); } static inline struct bch_read_bio *rbio_init(struct bio *bio, struct bch_io_opts opts) { struct bch_read_bio *rbio = to_rbio(bio); rbio->_state = 0; rbio->promote = NULL; rbio->opts = opts; return rbio; } void bch2_fs_io_read_exit(struct bch_fs *); int bch2_fs_io_read_init(struct bch_fs *); #endif /* _BCACHEFS_IO_READ_H */
159 140 140 140 98 97 98 98 141 159 143 141 160 156 159 113 108 109 113 113 113 113 113 111 2 109 109 96 108 111 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 // SPDX-License-Identifier: GPL-2.0-or-later /* * sysfile.c * * Initialize, read, write, etc. system files. * * Copyright (C) 2002, 2004 Oracle. All rights reserved. */ #include <linux/fs.h> #include <linux/types.h> #include <linux/highmem.h> #include <cluster/masklog.h> #include "ocfs2.h" #include "alloc.h" #include "dir.h" #include "inode.h" #include "journal.h" #include "sysfile.h" #include "buffer_head_io.h" static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb, int type, u32 slot); #ifdef CONFIG_DEBUG_LOCK_ALLOC static struct lock_class_key ocfs2_sysfile_cluster_lock_key[NUM_SYSTEM_INODES]; #endif static inline int is_global_system_inode(int type) { return type >= OCFS2_FIRST_ONLINE_SYSTEM_INODE && type <= OCFS2_LAST_GLOBAL_SYSTEM_INODE; } static struct inode **get_local_system_inode(struct ocfs2_super *osb, int type, u32 slot) { int index; struct inode **local_system_inodes, **free = NULL; BUG_ON(slot == OCFS2_INVALID_SLOT); BUG_ON(type < OCFS2_FIRST_LOCAL_SYSTEM_INODE || type > OCFS2_LAST_LOCAL_SYSTEM_INODE); spin_lock(&osb->osb_lock); local_system_inodes = osb->local_system_inodes; spin_unlock(&osb->osb_lock); if (unlikely(!local_system_inodes)) { local_system_inodes = kzalloc(array3_size(sizeof(struct inode *), NUM_LOCAL_SYSTEM_INODES, osb->max_slots), GFP_NOFS); if (!local_system_inodes) { mlog_errno(-ENOMEM); /* * return NULL here so that ocfs2_get_sytem_file_inodes * will try to create an inode and use it. We will try * to initialize local_system_inodes next time. */ return NULL; } spin_lock(&osb->osb_lock); if (osb->local_system_inodes) { /* Someone has initialized it for us. */ free = local_system_inodes; local_system_inodes = osb->local_system_inodes; } else osb->local_system_inodes = local_system_inodes; spin_unlock(&osb->osb_lock); kfree(free); } index = (slot * NUM_LOCAL_SYSTEM_INODES) + (type - OCFS2_FIRST_LOCAL_SYSTEM_INODE); return &local_system_inodes[index]; } struct inode *ocfs2_get_system_file_inode(struct ocfs2_super *osb, int type, u32 slot) { struct inode *inode = NULL; struct inode **arr = NULL; /* avoid the lookup if cached in local system file array */ if (is_global_system_inode(type)) { arr = &(osb->global_system_inodes[type]); } else arr = get_local_system_inode(osb, type, slot); mutex_lock(&osb->system_file_mutex); if (arr && ((inode = *arr) != NULL)) { /* get a ref in addition to the array ref */ inode = igrab(inode); mutex_unlock(&osb->system_file_mutex); BUG_ON(!inode); return inode; } /* this gets one ref thru iget */ inode = _ocfs2_get_system_file_inode(osb, type, slot); /* add one more if putting into array for first time */ if (arr && inode) { *arr = igrab(inode); BUG_ON(!*arr); } mutex_unlock(&osb->system_file_mutex); return inode; } static struct inode * _ocfs2_get_system_file_inode(struct ocfs2_super *osb, int type, u32 slot) { char namebuf[40]; struct inode *inode = NULL; u64 blkno; int status = 0; ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot); status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf, strlen(namebuf), &blkno); if (status < 0) { goto bail; } inode = ocfs2_iget(osb, blkno, OCFS2_FI_FLAG_SYSFILE, type); if (IS_ERR(inode)) { mlog_errno(PTR_ERR(inode)); inode = NULL; goto bail; } #ifdef CONFIG_DEBUG_LOCK_ALLOC if (type == LOCAL_USER_QUOTA_SYSTEM_INODE || type == LOCAL_GROUP_QUOTA_SYSTEM_INODE || type == JOURNAL_SYSTEM_INODE) { /* Ignore inode lock on these inodes as the lock does not * really belong to any process and lockdep cannot handle * that */ OCFS2_I(inode)->ip_inode_lockres.l_lockdep_map.key = NULL; } else { lockdep_init_map(&OCFS2_I(inode)->ip_inode_lockres. l_lockdep_map, ocfs2_system_inodes[type].si_name, &ocfs2_sysfile_cluster_lock_key[type], 0); } #endif bail: return inode; }
3 3 4 4 1 3 4 3 3 2 1 1 1 2 2 2 4 4 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 // SPDX-License-Identifier: GPL-2.0-or-later /* * Apple "Magic" Wireless Mouse driver * * Copyright (c) 2010 Michael Poole <mdpoole@troilus.org> * Copyright (c) 2010 Chase Douglas <chase.douglas@canonical.com> */ /* */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/hid.h> #include <linux/input/mt.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "hid-ids.h" static bool emulate_3button = true; module_param(emulate_3button, bool, 0644); MODULE_PARM_DESC(emulate_3button, "Emulate a middle button"); static int middle_button_start = -350; static int middle_button_stop = +350; static bool emulate_scroll_wheel = true; module_param(emulate_scroll_wheel, bool, 0644); MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel"); static unsigned int scroll_speed = 32; static int param_set_scroll_speed(const char *val, const struct kernel_param *kp) { unsigned long speed; if (!val || kstrtoul(val, 0, &speed) || speed > 63) return -EINVAL; scroll_speed = speed; return 0; } module_param_call(scroll_speed, param_set_scroll_speed, param_get_uint, &scroll_speed, 0644); MODULE_PARM_DESC(scroll_speed, "Scroll speed, value from 0 (slow) to 63 (fast)"); static bool scroll_acceleration = false; module_param(scroll_acceleration, bool, 0644); MODULE_PARM_DESC(scroll_acceleration, "Accelerate sequential scroll events"); static bool report_undeciphered; module_param(report_undeciphered, bool, 0644); MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event"); #define TRACKPAD2_2021_BT_VERSION 0x110 #define TRACKPAD_2024_BT_VERSION 0x314 #define TRACKPAD_REPORT_ID 0x28 #define TRACKPAD2_USB_REPORT_ID 0x02 #define TRACKPAD2_BT_REPORT_ID 0x31 #define MOUSE_REPORT_ID 0x29 #define MOUSE2_REPORT_ID 0x12 #define DOUBLE_REPORT_ID 0xf7 #define USB_BATTERY_TIMEOUT_MS 60000 /* These definitions are not precise, but they're close enough. (Bits * 0x03 seem to indicate the aspect ratio of the touch, bits 0x70 seem * to be some kind of bit mask -- 0x20 may be a near-field reading, * and 0x40 is actual contact, and 0x10 may be a start/stop or change * indication.) */ #define TOUCH_STATE_MASK 0xf0 #define TOUCH_STATE_NONE 0x00 #define TOUCH_STATE_START 0x30 #define TOUCH_STATE_DRAG 0x40 /* Number of high-resolution events for each low-resolution detent. */ #define SCROLL_HR_STEPS 10 #define SCROLL_HR_MULT (120 / SCROLL_HR_STEPS) #define SCROLL_HR_THRESHOLD 90 /* units */ #define SCROLL_ACCEL_DEFAULT 7 /* Touch surface information. Dimension is in hundredths of a mm, min and max * are in units. */ #define MOUSE_DIMENSION_X (float)9056 #define MOUSE_MIN_X -1100 #define MOUSE_MAX_X 1258 #define MOUSE_RES_X ((MOUSE_MAX_X - MOUSE_MIN_X) / (MOUSE_DIMENSION_X / 100)) #define MOUSE_DIMENSION_Y (float)5152 #define MOUSE_MIN_Y -1589 #define MOUSE_MAX_Y 2047 #define MOUSE_RES_Y ((MOUSE_MAX_Y - MOUSE_MIN_Y) / (MOUSE_DIMENSION_Y / 100)) #define TRACKPAD_DIMENSION_X (float)13000 #define TRACKPAD_MIN_X -2909 #define TRACKPAD_MAX_X 3167 #define TRACKPAD_RES_X \ ((TRACKPAD_MAX_X - TRACKPAD_MIN_X) / (TRACKPAD_DIMENSION_X / 100)) #define TRACKPAD_DIMENSION_Y (float)11000 #define TRACKPAD_MIN_Y -2456 #define TRACKPAD_MAX_Y 2565 #define TRACKPAD_RES_Y \ ((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100)) #define TRACKPAD2_DIMENSION_X (float)16000 #define TRACKPAD2_MIN_X -3678 #define TRACKPAD2_MAX_X 3934 #define TRACKPAD2_RES_X \ ((TRACKPAD2_MAX_X - TRACKPAD2_MIN_X) / (TRACKPAD2_DIMENSION_X / 100)) #define TRACKPAD2_DIMENSION_Y (float)11490 #define TRACKPAD2_MIN_Y -2478 #define TRACKPAD2_MAX_Y 2587 #define TRACKPAD2_RES_Y \ ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100)) /** * struct magicmouse_sc - Tracks Magic Mouse-specific data. * @input: Input device through which we report events. * @quirks: Currently unused. * @ntouches: Number of touches in most recent touch report. * @scroll_accel: Number of consecutive scroll motions. * @scroll_jiffies: Time of last scroll motion. * @touches: Most recent data for a touch, indexed by tracking ID. * @tracking_ids: Mapping of current touch input data to @touches. * @hdev: Pointer to the underlying HID device. * @work: Workqueue to handle initialization retry for quirky devices. * @battery_timer: Timer for obtaining battery level information. */ struct magicmouse_sc { struct input_dev *input; unsigned long quirks; int ntouches; int scroll_accel; unsigned long scroll_jiffies; struct { short x; short y; short scroll_x; short scroll_y; short scroll_x_hr; short scroll_y_hr; u8 size; bool scroll_x_active; bool scroll_y_active; } touches[16]; int tracking_ids[16]; struct hid_device *hdev; struct delayed_work work; struct timer_list battery_timer; }; static int magicmouse_firm_touch(struct magicmouse_sc *msc) { int touch = -1; int ii; /* If there is only one "firm" touch, set touch to its * tracking ID. */ for (ii = 0; ii < msc->ntouches; ii++) { int idx = msc->tracking_ids[ii]; if (msc->touches[idx].size < 8) { /* Ignore this touch. */ } else if (touch >= 0) { touch = -1; break; } else { touch = idx; } } return touch; } static void magicmouse_emit_buttons(struct magicmouse_sc *msc, int state) { int last_state = test_bit(BTN_LEFT, msc->input->key) << 0 | test_bit(BTN_RIGHT, msc->input->key) << 1 | test_bit(BTN_MIDDLE, msc->input->key) << 2; if (emulate_3button) { int id; /* If some button was pressed before, keep it held * down. Otherwise, if there's exactly one firm * touch, use that to override the mouse's guess. */ if (state == 0) { /* The button was released. */ } else if (last_state != 0) { state = last_state; } else if ((id = magicmouse_firm_touch(msc)) >= 0) { int x = msc->touches[id].x; if (x < middle_button_start) state = 1; else if (x > middle_button_stop) state = 2; else state = 4; } /* else: we keep the mouse's guess */ input_report_key(msc->input, BTN_MIDDLE, state & 4); } input_report_key(msc->input, BTN_LEFT, state & 1); input_report_key(msc->input, BTN_RIGHT, state & 2); if (state != last_state) msc->scroll_accel = SCROLL_ACCEL_DEFAULT; } static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tdata) { struct input_dev *input = msc->input; int id, x, y, size, orientation, touch_major, touch_minor, state, down; int pressure = 0; if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE || input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) { id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf; x = (tdata[1] << 28 | tdata[0] << 20) >> 20; y = -((tdata[2] << 24 | tdata[1] << 16) >> 20); size = tdata[5] & 0x3f; orientation = (tdata[6] >> 2) - 32; touch_major = tdata[3]; touch_minor = tdata[4]; state = tdata[7] & TOUCH_STATE_MASK; down = state != TOUCH_STATE_NONE; } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { id = tdata[8] & 0xf; x = (tdata[1] << 27 | tdata[0] << 19) >> 19; y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19); size = tdata[6]; orientation = (tdata[8] >> 5) - 4; touch_major = tdata[4]; touch_minor = tdata[5]; pressure = tdata[7]; state = tdata[3] & 0xC0; down = state == 0x80; } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ id = (tdata[7] << 2 | tdata[6] >> 6) & 0xf; x = (tdata[1] << 27 | tdata[0] << 19) >> 19; y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19); size = tdata[6] & 0x3f; orientation = (tdata[7] >> 2) - 32; touch_major = tdata[4]; touch_minor = tdata[5]; state = tdata[8] & TOUCH_STATE_MASK; down = state != TOUCH_STATE_NONE; } /* Store tracking ID and other fields. */ msc->tracking_ids[raw_id] = id; msc->touches[id].x = x; msc->touches[id].y = y; msc->touches[id].size = size; /* If requested, emulate a scroll wheel by detecting small * vertical touch motions. */ if (emulate_scroll_wheel && input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { unsigned long now = jiffies; int step_x = msc->touches[id].scroll_x - x; int step_y = msc->touches[id].scroll_y - y; int step_hr = max_t(int, ((64 - (int)scroll_speed) * msc->scroll_accel) / SCROLL_HR_STEPS, 1); int step_x_hr = msc->touches[id].scroll_x_hr - x; int step_y_hr = msc->touches[id].scroll_y_hr - y; /* Calculate and apply the scroll motion. */ switch (state) { case TOUCH_STATE_START: msc->touches[id].scroll_x = x; msc->touches[id].scroll_y = y; msc->touches[id].scroll_x_hr = x; msc->touches[id].scroll_y_hr = y; msc->touches[id].scroll_x_active = false; msc->touches[id].scroll_y_active = false; /* Reset acceleration after half a second. */ if (scroll_acceleration && time_before(now, msc->scroll_jiffies + HZ / 2)) msc->scroll_accel = max_t(int, msc->scroll_accel - 1, 1); else msc->scroll_accel = SCROLL_ACCEL_DEFAULT; break; case TOUCH_STATE_DRAG: step_x /= (64 - (int)scroll_speed) * msc->scroll_accel; if (step_x != 0) { msc->touches[id].scroll_x -= step_x * (64 - scroll_speed) * msc->scroll_accel; msc->scroll_jiffies = now; input_report_rel(input, REL_HWHEEL, -step_x); } step_y /= (64 - (int)scroll_speed) * msc->scroll_accel; if (step_y != 0) { msc->touches[id].scroll_y -= step_y * (64 - scroll_speed) * msc->scroll_accel; msc->scroll_jiffies = now; input_report_rel(input, REL_WHEEL, step_y); } if (!msc->touches[id].scroll_x_active && abs(step_x_hr) > SCROLL_HR_THRESHOLD) { msc->touches[id].scroll_x_active = true; msc->touches[id].scroll_x_hr = x; step_x_hr = 0; } step_x_hr /= step_hr; if (step_x_hr != 0 && msc->touches[id].scroll_x_active) { msc->touches[id].scroll_x_hr -= step_x_hr * step_hr; input_report_rel(input, REL_HWHEEL_HI_RES, -step_x_hr * SCROLL_HR_MULT); } if (!msc->touches[id].scroll_y_active && abs(step_y_hr) > SCROLL_HR_THRESHOLD) { msc->touches[id].scroll_y_active = true; msc->touches[id].scroll_y_hr = y; step_y_hr = 0; } step_y_hr /= step_hr; if (step_y_hr != 0 && msc->touches[id].scroll_y_active) { msc->touches[id].scroll_y_hr -= step_y_hr * step_hr; input_report_rel(input, REL_WHEEL_HI_RES, step_y_hr * SCROLL_HR_MULT); } break; } } if (down) msc->ntouches++; input_mt_slot(input, id); input_mt_report_slot_state(input, MT_TOOL_FINGER, down); /* Generate the input events for this touch. */ if (down) { input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2); input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2); input_report_abs(input, ABS_MT_ORIENTATION, -orientation); input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) input_report_abs(input, ABS_MT_PRESSURE, pressure); if (report_undeciphered) { if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE || input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) input_event(input, EV_MSC, MSC_RAW, tdata[7]); else if (input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) input_event(input, EV_MSC, MSC_RAW, tdata[8]); } } } static int magicmouse_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); struct input_dev *input = msc->input; int x = 0, y = 0, ii, clicks = 0, npoints; switch (data[0]) { case TRACKPAD_REPORT_ID: case TRACKPAD2_BT_REPORT_ID: /* Expect four bytes of prefix, and N*9 bytes of touch data. */ if (size < 4 || ((size - 4) % 9) != 0) return 0; npoints = (size - 4) / 9; if (npoints > 15) { hid_warn(hdev, "invalid size value (%d) for TRACKPAD_REPORT_ID\n", size); return 0; } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 9 + 4); clicks = data[1]; /* The following bits provide a device specific timestamp. They * are unused here. * * ts = data[1] >> 6 | data[2] << 2 | data[3] << 10; */ break; case TRACKPAD2_USB_REPORT_ID: /* Expect twelve bytes of prefix and N*9 bytes of touch data. */ if (size < 12 || ((size - 12) % 9) != 0) return 0; npoints = (size - 12) / 9; if (npoints > 15) { hid_warn(hdev, "invalid size value (%d) for TRACKPAD2_USB_REPORT_ID\n", size); return 0; } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 9 + 12); clicks = data[1]; break; case MOUSE_REPORT_ID: /* Expect six bytes of prefix, and N*8 bytes of touch data. */ if (size < 6 || ((size - 6) % 8) != 0) return 0; npoints = (size - 6) / 8; if (npoints > 15) { hid_warn(hdev, "invalid size value (%d) for MOUSE_REPORT_ID\n", size); return 0; } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 8 + 6); /* When emulating three-button mode, it is important * to have the current touch information before * generating a click event. */ x = (int)(((data[3] & 0x0c) << 28) | (data[1] << 22)) >> 22; y = (int)(((data[3] & 0x30) << 26) | (data[2] << 22)) >> 22; clicks = data[3]; /* The following bits provide a device specific timestamp. They * are unused here. * * ts = data[3] >> 6 | data[4] << 2 | data[5] << 10; */ break; case MOUSE2_REPORT_ID: /* Size is either 8 or (14 + 8 * N) */ if (size != 8 && (size < 14 || (size - 14) % 8 != 0)) return 0; npoints = (size - 14) / 8; if (npoints > 15) { hid_warn(hdev, "invalid size value (%d) for MOUSE2_REPORT_ID\n", size); return 0; } msc->ntouches = 0; for (ii = 0; ii < npoints; ii++) magicmouse_emit_touch(msc, ii, data + ii * 8 + 14); /* When emulating three-button mode, it is important * to have the current touch information before * generating a click event. */ x = (int)((data[3] << 24) | (data[2] << 16)) >> 16; y = (int)((data[5] << 24) | (data[4] << 16)) >> 16; clicks = data[1]; /* The following bits provide a device specific timestamp. They * are unused here. * * ts = data[11] >> 6 | data[12] << 2 | data[13] << 10; */ break; case DOUBLE_REPORT_ID: /* Sometimes the trackpad sends two touch reports in one * packet. */ magicmouse_raw_event(hdev, report, data + 2, data[1]); magicmouse_raw_event(hdev, report, data + 2 + data[1], size - 2 - data[1]); return 0; default: return 0; } if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE || input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) { magicmouse_emit_buttons(msc, clicks & 3); input_report_rel(input, REL_X, x); input_report_rel(input, REL_Y, y); } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { input_mt_sync_frame(input); input_report_key(input, BTN_MOUSE, clicks & 1); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ input_report_key(input, BTN_MOUSE, clicks & 1); input_mt_report_pointer_emulation(input, true); } input_sync(input); return 1; } static int magicmouse_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); if (msc->input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 && field->report->id == MOUSE2_REPORT_ID) { /* * magic_mouse_raw_event has done all the work. Skip hidinput. * * Specifically, hidinput may modify BTN_LEFT and BTN_RIGHT, * breaking emulate_3button. */ return 1; } return 0; } static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev) { int error; int mt_flags = 0; __set_bit(EV_KEY, input->evbit); if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE || input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) { __set_bit(BTN_LEFT, input->keybit); __set_bit(BTN_RIGHT, input->keybit); if (emulate_3button) __set_bit(BTN_MIDDLE, input->keybit); __set_bit(EV_REL, input->evbit); __set_bit(REL_X, input->relbit); __set_bit(REL_Y, input->relbit); if (emulate_scroll_wheel) { __set_bit(REL_WHEEL, input->relbit); __set_bit(REL_HWHEEL, input->relbit); __set_bit(REL_WHEEL_HI_RES, input->relbit); __set_bit(REL_HWHEEL_HI_RES, input->relbit); } } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { /* If the trackpad has been connected to a Mac, the name is * automatically personalized, e.g., "José Expósito's Trackpad". * When connected through Bluetooth, the personalized name is * reported, however, when connected through USB the generic * name is reported. * Set the device name to ensure the same driver settings get * loaded, whether connected through bluetooth or USB. */ if (hdev->vendor == BT_VENDOR_ID_APPLE) { if (input->id.version == TRACKPAD2_2021_BT_VERSION) input->name = "Apple Inc. Magic Trackpad 2021"; else if (input->id.version == TRACKPAD_2024_BT_VERSION) { input->name = "Apple Inc. Magic Trackpad USB-C"; } else { input->name = "Apple Inc. Magic Trackpad"; } } else { /* USB_VENDOR_ID_APPLE */ input->name = hdev->name; } __clear_bit(EV_MSC, input->evbit); __clear_bit(BTN_0, input->keybit); __clear_bit(BTN_RIGHT, input->keybit); __clear_bit(BTN_MIDDLE, input->keybit); __set_bit(BTN_MOUSE, input->keybit); __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); __set_bit(BTN_TOOL_FINGER, input->keybit); mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK; } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ /* input->keybit is initialized with incorrect button info * for Magic Trackpad. There really is only one physical * button (BTN_LEFT == BTN_MOUSE). Make sure we don't * advertise buttons that don't exist... */ __clear_bit(BTN_RIGHT, input->keybit); __clear_bit(BTN_MIDDLE, input->keybit); __set_bit(BTN_MOUSE, input->keybit); __set_bit(BTN_TOOL_FINGER, input->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); __set_bit(BTN_TOOL_QUADTAP, input->keybit); __set_bit(BTN_TOOL_QUINTTAP, input->keybit); __set_bit(BTN_TOUCH, input->keybit); __set_bit(INPUT_PROP_POINTER, input->propbit); __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); } __set_bit(EV_ABS, input->evbit); error = input_mt_init_slots(input, 16, mt_flags); if (error) return error; input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2, 4, 0); input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2, 4, 0); /* Note: Touch Y position from the device is inverted relative * to how pointer motion is reported (and relative to how USB * HID recommends the coordinates work). This driver keeps * the origin at the same position, and just uses the additive * inverse of the reported Y. */ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE || input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) { input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0); input_set_abs_params(input, ABS_MT_POSITION_X, MOUSE_MIN_X, MOUSE_MAX_X, 4, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0); input_abs_set_res(input, ABS_MT_POSITION_X, MOUSE_RES_X); input_abs_set_res(input, ABS_MT_POSITION_Y, MOUSE_RES_Y); } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0); input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0); input_set_abs_params(input, ABS_X, TRACKPAD2_MIN_X, TRACKPAD2_MAX_X, 0, 0); input_set_abs_params(input, ABS_Y, TRACKPAD2_MIN_Y, TRACKPAD2_MAX_Y, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_X, TRACKPAD2_MIN_X, TRACKPAD2_MAX_X, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, TRACKPAD2_MIN_Y, TRACKPAD2_MAX_Y, 0, 0); input_abs_set_res(input, ABS_X, TRACKPAD2_RES_X); input_abs_set_res(input, ABS_Y, TRACKPAD2_RES_Y); input_abs_set_res(input, ABS_MT_POSITION_X, TRACKPAD2_RES_X); input_abs_set_res(input, ABS_MT_POSITION_Y, TRACKPAD2_RES_Y); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0); input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0); input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0); input_set_abs_params(input, ABS_MT_POSITION_X, TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0); input_abs_set_res(input, ABS_X, TRACKPAD_RES_X); input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y); input_abs_set_res(input, ABS_MT_POSITION_X, TRACKPAD_RES_X); input_abs_set_res(input, ABS_MT_POSITION_Y, TRACKPAD_RES_Y); } input_set_events_per_packet(input, 60); if (report_undeciphered && input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { __set_bit(EV_MSC, input->evbit); __set_bit(MSC_RAW, input->mscbit); } /* * hid-input may mark device as using autorepeat, but neither * the trackpad, nor the mouse actually want it. */ __clear_bit(EV_REP, input->evbit); return 0; } static int magicmouse_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); if (!msc->input) msc->input = hi->input; /* Magic Trackpad does not give relative data after switching to MT */ if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD || hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && field->flags & HID_MAIN_ITEM_RELATIVE) return -1; return 0; } static int magicmouse_input_configured(struct hid_device *hdev, struct hid_input *hi) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); int ret; ret = magicmouse_setup_input(msc->input, hdev); if (ret) { hid_err(hdev, "magicmouse setup input failed (%d)\n", ret); /* clean msc->input to notify probe() of the failure */ msc->input = NULL; return ret; } return 0; } static int magicmouse_enable_multitouch(struct hid_device *hdev) { const u8 *feature; const u8 feature_mt[] = { 0xD7, 0x01 }; const u8 feature_mt_mouse2[] = { 0xF1, 0x02, 0x01 }; const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 }; const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 }; u8 *buf; int ret; int feature_size; if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { if (hdev->vendor == BT_VENDOR_ID_APPLE) { feature_size = sizeof(feature_mt_trackpad2_bt); feature = feature_mt_trackpad2_bt; } else { /* USB_VENDOR_ID_APPLE */ feature_size = sizeof(feature_mt_trackpad2_usb); feature = feature_mt_trackpad2_usb; } } else if (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) { feature_size = sizeof(feature_mt_mouse2); feature = feature_mt_mouse2; } else { feature_size = sizeof(feature_mt); feature = feature_mt; } buf = kmemdup(feature, feature_size, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); kfree(buf); return ret; } static void magicmouse_enable_mt_work(struct work_struct *work) { struct magicmouse_sc *msc = container_of(work, struct magicmouse_sc, work.work); int ret; ret = magicmouse_enable_multitouch(msc->hdev); if (ret < 0) hid_err(msc->hdev, "unable to request touch data (%d)\n", ret); } static int magicmouse_fetch_battery(struct hid_device *hdev) { #ifdef CONFIG_HID_BATTERY_STRENGTH struct hid_report_enum *report_enum; struct hid_report *report; if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE || (hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 && hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)) return -1; report_enum = &hdev->report_enum[hdev->battery_report_type]; report = report_enum->report_id_hash[hdev->battery_report_id]; if (!report || report->maxfield < 1) return -1; if (hdev->battery_capacity == hdev->battery_max) return -1; hid_hw_request(hdev, report, HID_REQ_GET_REPORT); return 0; #else return -1; #endif } static void magicmouse_battery_timer_tick(struct timer_list *t) { struct magicmouse_sc *msc = from_timer(msc, t, battery_timer); struct hid_device *hdev = msc->hdev; if (magicmouse_fetch_battery(hdev) == 0) { mod_timer(&msc->battery_timer, jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS)); } } static int magicmouse_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct magicmouse_sc *msc; struct hid_report *report; int ret; msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); if (msc == NULL) { hid_err(hdev, "can't alloc magicmouse descriptor\n"); return -ENOMEM; } msc->scroll_accel = SCROLL_ACCEL_DEFAULT; msc->hdev = hdev; INIT_DEFERRABLE_WORK(&msc->work, magicmouse_enable_mt_work); msc->quirks = id->driver_data; hid_set_drvdata(hdev, msc); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "magicmouse hid parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "magicmouse hw start failed\n"); return ret; } timer_setup(&msc->battery_timer, magicmouse_battery_timer_tick, 0); mod_timer(&msc->battery_timer, jiffies + msecs_to_jiffies(USB_BATTERY_TIMEOUT_MS)); magicmouse_fetch_battery(hdev); if (id->vendor == USB_VENDOR_ID_APPLE && (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && hdev->type != HID_TYPE_USBMOUSE))) return 0; if (!msc->input) { hid_err(hdev, "magicmouse input not registered\n"); ret = -ENOMEM; goto err_stop_hw; } if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE) report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE_REPORT_ID, 0); else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE2_REPORT_ID, 0); else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { if (id->vendor == BT_VENDOR_ID_APPLE) report = hid_register_report(hdev, HID_INPUT_REPORT, TRACKPAD2_BT_REPORT_ID, 0); else /* USB_VENDOR_ID_APPLE */ report = hid_register_report(hdev, HID_INPUT_REPORT, TRACKPAD2_USB_REPORT_ID, 0); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ report = hid_register_report(hdev, HID_INPUT_REPORT, TRACKPAD_REPORT_ID, 0); report = hid_register_report(hdev, HID_INPUT_REPORT, DOUBLE_REPORT_ID, 0); } if (!report) { hid_err(hdev, "unable to register touch report\n"); ret = -ENOMEM; goto err_stop_hw; } report->size = 6; /* * Some devices repond with 'invalid report id' when feature * report switching it into multitouch mode is sent to it. * * This results in -EIO from the _raw low-level transport callback, * but there seems to be no other way of switching the mode. * Thus the super-ugly hacky success check below. */ ret = magicmouse_enable_multitouch(hdev); if (ret != -EIO && ret < 0) { hid_err(hdev, "unable to request touch data (%d)\n", ret); goto err_stop_hw; } if (ret == -EIO && id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) { schedule_delayed_work(&msc->work, msecs_to_jiffies(500)); } return 0; err_stop_hw: del_timer_sync(&msc->battery_timer); hid_hw_stop(hdev); return ret; } static void magicmouse_remove(struct hid_device *hdev) { struct magicmouse_sc *msc = hid_get_drvdata(hdev); if (msc) { cancel_delayed_work_sync(&msc->work); del_timer_sync(&msc->battery_timer); } hid_hw_stop(hdev); } static const __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { /* * Change the usage from: * 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 0 * 0x09, 0x0b, // Usage (Vendor Usage 0x0b) 3 * To: * 0x05, 0x01, // Usage Page (Generic Desktop) 0 * 0x09, 0x02, // Usage (Mouse) 2 */ if (hdev->vendor == USB_VENDOR_ID_APPLE && (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && *rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) { hid_info(hdev, "fixing up magicmouse battery report descriptor\n"); *rsize = *rsize - 1; rdesc = kmemdup(rdesc + 1, *rsize, GFP_KERNEL); if (!rdesc) return NULL; rdesc[0] = 0x05; rdesc[1] = 0x01; rdesc[2] = 0x09; rdesc[3] = 0x02; } return rdesc; } static const struct hid_device_id magic_mice[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 }, { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE2), .driver_data = 0 }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 }, { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 }, { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, { } }; MODULE_DEVICE_TABLE(hid, magic_mice); static struct hid_driver magicmouse_driver = { .name = "magicmouse", .id_table = magic_mice, .probe = magicmouse_probe, .remove = magicmouse_remove, .report_fixup = magicmouse_report_fixup, .raw_event = magicmouse_raw_event, .event = magicmouse_event, .input_mapping = magicmouse_input_mapping, .input_configured = magicmouse_input_configured, }; module_hid_driver(magicmouse_driver); MODULE_DESCRIPTION("Apple \"Magic\" Wireless Mouse driver"); MODULE_LICENSE("GPL");
23 21 21 23 4 4 12 11 1 10 12 17 12 1 11 17 1176 1173 1172 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 4