| 9 9 3 8 9 9 9 9 3 6 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 | // SPDX-License-Identifier: GPL-2.0-only /* * This file contains vfs directory ops for the 9P2000 protocol. * * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com> * Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov> */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uio.h> #include <linux/fscache.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" /** * struct p9_rdir - readdir accounting * @head: start offset of current dirread buffer * @tail: end offset of current dirread buffer * @buf: dirread buffer * * private structure for keeping track of readdir * allocated on demand */ struct p9_rdir { int head; int tail; uint8_t buf[]; }; /** * dt_type - return file type * @mistat: mistat structure * */ static inline int dt_type(struct p9_wstat *mistat) { unsigned long perm = mistat->mode; int rettype = DT_REG; if (perm & P9_DMDIR) rettype = DT_DIR; if (perm & P9_DMSYMLINK) rettype = DT_LNK; return rettype; } /** * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir * @filp: opened file structure * @buflen: Length in bytes of buffer to allocate * */ static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen) { struct p9_fid *fid = filp->private_data; if (!fid->rdir) fid->rdir = kzalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL); return fid->rdir; } /** * v9fs_dir_readdir - iterate through a directory * @file: opened file structure * @ctx: actor we feed the entries to * */ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) { bool over; struct p9_wstat st; int err = 0; struct p9_fid *fid; int buflen; struct p9_rdir *rdir; struct kvec kvec; p9_debug(P9_DEBUG_VFS, "name %pD\n", file); fid = file->private_data; buflen = fid->clnt->msize - P9_IOHDRSZ; rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; kvec.iov_base = rdir->buf; kvec.iov_len = buflen; while (1) { if (rdir->tail == rdir->head) { struct iov_iter to; int n; iov_iter_kvec(&to, ITER_DEST, &kvec, 1, buflen); n = p9_client_read(file->private_data, ctx->pos, &to, &err); if (err) return err; if (n == 0) return 0; rdir->head = 0; rdir->tail = n; } while (rdir->head < rdir->tail) { err = p9stat_read(fid->clnt, rdir->buf + rdir->head, rdir->tail - rdir->head, &st); if (err <= 0) { p9_debug(P9_DEBUG_VFS, "returned %d\n", err); return -EIO; } over = !dir_emit(ctx, st.name, strlen(st.name), QID2INO(&st.qid), dt_type(&st)); p9stat_free(&st); if (over) return 0; rdir->head += err; ctx->pos += err; } } } /** * v9fs_dir_readdir_dotl - iterate through a directory * @file: opened file structure * @ctx: actor we feed the entries to * */ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx) { int err = 0; struct p9_fid *fid; int buflen; struct p9_rdir *rdir; struct p9_dirent curdirent; p9_debug(P9_DEBUG_VFS, "name %pD\n", file); fid = file->private_data; buflen = fid->clnt->msize - P9_READDIRHDRSZ; rdir = v9fs_alloc_rdir_buf(file, buflen); if (!rdir) return -ENOMEM; while (1) { if (rdir->tail == rdir->head) { err = p9_client_readdir(fid, rdir->buf, buflen, ctx->pos); if (err <= 0) return err; rdir->head = 0; rdir->tail = err; } while (rdir->head < rdir->tail) { err = p9dirent_read(fid->clnt, rdir->buf + rdir->head, rdir->tail - rdir->head, &curdirent); if (err < 0) { p9_debug(P9_DEBUG_VFS, "returned %d\n", err); return -EIO; } if (!dir_emit(ctx, curdirent.d_name, strlen(curdirent.d_name), QID2INO(&curdirent.qid), curdirent.d_type)) return 0; ctx->pos = curdirent.d_off; rdir->head += err; } } } /** * v9fs_dir_release - close a directory or a file * @inode: inode of the directory or file * @filp: file pointer to a directory or file * */ int v9fs_dir_release(struct inode *inode, struct file *filp) { struct v9fs_inode *v9inode = V9FS_I(inode); struct p9_fid *fid; __le32 version; loff_t i_size; int retval = 0, put_err; fid = filp->private_data; p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n", inode, filp, fid ? fid->fid : -1); if (fid) { if ((S_ISREG(inode->i_mode)) && (filp->f_mode & FMODE_WRITE)) retval = filemap_fdatawrite(inode->i_mapping); spin_lock(&inode->i_lock); hlist_del(&fid->ilist); spin_unlock(&inode->i_lock); put_err = p9_fid_put(fid); retval = retval < 0 ? retval : put_err; } if ((filp->f_mode & FMODE_WRITE)) { version = cpu_to_le32(v9inode->qid.version); i_size = i_size_read(inode); fscache_unuse_cookie(v9fs_inode_cookie(v9inode), &version, &i_size); } else { fscache_unuse_cookie(v9fs_inode_cookie(v9inode), NULL, NULL); } return retval; } const struct file_operations v9fs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, .iterate_shared = v9fs_dir_readdir, .open = v9fs_file_open, .release = v9fs_dir_release, }; const struct file_operations v9fs_dir_operations_dotl = { .read = generic_read_dir, .llseek = generic_file_llseek, .iterate_shared = v9fs_dir_readdir_dotl, .open = v9fs_file_open, .release = v9fs_dir_release, .fsync = v9fs_file_fsync_dotl, }; |
| 1 1 4 3 3 2 1 1 3 32 95 26 71 78 18 3 71 20 36 27 27 27 25 25 3 28 25 84 153 90 90 89 85 90 91 92 92 92 92 92 92 108 92 77 16 16 67 35 46 46 46 46 46 38 37 25 27 28 27 25 28 31 34 36 36 36 36 127 48 46 46 3 28 28 28 25 25 25 35 35 35 25 35 46 46 43 43 46 3 43 43 43 43 43 54 53 47 46 46 46 46 46 43 43 3 3 41 9 9 9 35 35 2 35 41 3 41 2 41 34 35 2 35 2 35 2 35 35 35 2 35 35 25 35 23 39 22 50 49 49 9 9 8 7 20 20 20 20 3 19 20 12 12 36 7 7 7 7 7 12 95 12 21 6 21 12 77 76 67 59 41 22 2 76 42 4 24 36 10 3 3 2 1 1 1 10 13 12 5 10 5 20 20 21 21 14 13 11 11 10 13 12 11 21 21 27 24 21 23 22 21 21 21 27 25 25 25 10 6 8 9 5 5 3 5 5 7 2 2 2 1 1 1 11 11 11 34 34 32 34 4 4 4 4 4 4 77 78 5 45 36 6 45 32 45 32 23 23 22 21 21 21 21 21 21 11 14 18 17 17 17 4 3 3 1 10 9 8 7 6 2 1 4 19 53 3 10 54 54 53 41 4 3 3 3 2 2 1 1 17 17 15 14 2 2 2 7 7 5 19 3 2 2 2 2 2 7 1 2 3 27 14 53 54 2 14 14 14 14 14 97 25 86 84 23 5 4 4 3 3 4 10 8 2 6 6 6 1 97 68 3 3 2 3 69 69 68 69 69 68 69 95 95 68 54 32 34 34 32 24 24 24 24 24 8 39 31 39 33 33 21 14 22 22 20 7 39 27 3 12 2 10 2 2 7 8 2 6 1 5 1 4 4 4 1 2 3 3 3 3 3 69 69 22 42 4 42 6 69 36 36 5 36 36 36 5 33 12 3 9 30 30 2 29 4 9 27 2 26 26 27 26 27 5 12 12 12 12 1 12 12 12 1 12 12 12 12 1 12 12 12 4 4 4 4 4 12 4 8 12 8 8 62 63 63 4 62 62 78 77 76 78 3 11 11 11 11 11 8 2 2 7 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2009 Red Hat, Inc. * Copyright (C) 2006 Rusty Russell IBM Corporation * * Author: Michael S. Tsirkin <mst@redhat.com> * * Inspiration, some code, and most witty comments come from * Documentation/virtual/lguest/lguest.c, by Rusty Russell * * Generic code for virtio server in host kernel. */ #include <linux/eventfd.h> #include <linux/vhost.h> #include <linux/uio.h> #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/file.h> #include <linux/highmem.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/sort.h> #include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/sched/vhost_task.h> #include <linux/interval_tree_generic.h> #include <linux/nospec.h> #include <linux/kcov.h> #include "vhost.h" static ushort max_mem_regions = 64; module_param(max_mem_regions, ushort, 0444); MODULE_PARM_DESC(max_mem_regions, "Maximum number of memory regions in memory map. (default: 64)"); static int max_iotlb_entries = 2048; module_param(max_iotlb_entries, int, 0444); MODULE_PARM_DESC(max_iotlb_entries, "Maximum number of iotlb entries. (default: 2048)"); enum { VHOST_MEMORY_F_LOG = 0x1, }; #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) { vq->user_be = !virtio_legacy_is_little_endian(); } static void vhost_enable_cross_endian_big(struct vhost_virtqueue *vq) { vq->user_be = true; } static void vhost_enable_cross_endian_little(struct vhost_virtqueue *vq) { vq->user_be = false; } static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) { struct vhost_vring_state s; if (vq->private_data) return -EBUSY; if (copy_from_user(&s, argp, sizeof(s))) return -EFAULT; if (s.num != VHOST_VRING_LITTLE_ENDIAN && s.num != VHOST_VRING_BIG_ENDIAN) return -EINVAL; if (s.num == VHOST_VRING_BIG_ENDIAN) vhost_enable_cross_endian_big(vq); else vhost_enable_cross_endian_little(vq); return 0; } static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, int __user *argp) { struct vhost_vring_state s = { .index = idx, .num = vq->user_be }; if (copy_to_user(argp, &s, sizeof(s))) return -EFAULT; return 0; } static void vhost_init_is_le(struct vhost_virtqueue *vq) { /* Note for legacy virtio: user_be is initialized at reset time * according to the host endianness. If userspace does not set an * explicit endianness, the default behavior is native endian, as * expected by legacy virtio. */ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be; } #else static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) { } static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp) { return -ENOIOCTLCMD; } static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx, int __user *argp) { return -ENOIOCTLCMD; } static void vhost_init_is_le(struct vhost_virtqueue *vq) { vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || virtio_legacy_is_little_endian(); } #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */ static void vhost_reset_is_le(struct vhost_virtqueue *vq) { vhost_init_is_le(vq); } struct vhost_flush_struct { struct vhost_work work; struct completion wait_event; }; static void vhost_flush_work(struct vhost_work *work) { struct vhost_flush_struct *s; s = container_of(work, struct vhost_flush_struct, work); complete(&s->wait_event); } static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt) { struct vhost_poll *poll; poll = container_of(pt, struct vhost_poll, table); poll->wqh = wqh; add_wait_queue(wqh, &poll->wait); } static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) { struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait); struct vhost_work *work = &poll->work; if (!(key_to_poll(key) & poll->mask)) return 0; if (!poll->dev->use_worker) work->fn(work); else vhost_poll_queue(poll); return 0; } void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn) { clear_bit(VHOST_WORK_QUEUED, &work->flags); work->fn = fn; } EXPORT_SYMBOL_GPL(vhost_work_init); /* Init poll structure */ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, __poll_t mask, struct vhost_dev *dev, struct vhost_virtqueue *vq) { init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup); init_poll_funcptr(&poll->table, vhost_poll_func); poll->mask = mask; poll->dev = dev; poll->wqh = NULL; poll->vq = vq; vhost_work_init(&poll->work, fn); } EXPORT_SYMBOL_GPL(vhost_poll_init); /* Start polling a file. We add ourselves to file's wait queue. The caller must * keep a reference to a file until after vhost_poll_stop is called. */ int vhost_poll_start(struct vhost_poll *poll, struct file *file) { __poll_t mask; if (poll->wqh) return 0; mask = vfs_poll(file, &poll->table); if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); if (mask & EPOLLERR) { vhost_poll_stop(poll); return -EINVAL; } return 0; } EXPORT_SYMBOL_GPL(vhost_poll_start); /* Stop polling a file. After this function returns, it becomes safe to drop the * file reference. You must also flush afterwards. */ void vhost_poll_stop(struct vhost_poll *poll) { if (poll->wqh) { remove_wait_queue(poll->wqh, &poll->wait); poll->wqh = NULL; } } EXPORT_SYMBOL_GPL(vhost_poll_stop); static void vhost_worker_queue(struct vhost_worker *worker, struct vhost_work *work) { if (!test_and_set_bit(VHOST_WORK_QUEUED, &work->flags)) { /* We can only add the work to the list after we're * sure it was not in the list. * test_and_set_bit() implies a memory barrier. */ llist_add(&work->node, &worker->work_list); vhost_task_wake(worker->vtsk); } } bool vhost_vq_work_queue(struct vhost_virtqueue *vq, struct vhost_work *work) { struct vhost_worker *worker; bool queued = false; rcu_read_lock(); worker = rcu_dereference(vq->worker); if (worker) { queued = true; vhost_worker_queue(worker, work); } rcu_read_unlock(); return queued; } EXPORT_SYMBOL_GPL(vhost_vq_work_queue); /** * __vhost_worker_flush - flush a worker * @worker: worker to flush * * The worker's flush_mutex must be held. */ static void __vhost_worker_flush(struct vhost_worker *worker) { struct vhost_flush_struct flush; if (!worker->attachment_cnt || worker->killed) return; init_completion(&flush.wait_event); vhost_work_init(&flush.work, vhost_flush_work); vhost_worker_queue(worker, &flush.work); /* * Drop mutex in case our worker is killed and it needs to take the * mutex to force cleanup. */ mutex_unlock(&worker->mutex); wait_for_completion(&flush.wait_event); mutex_lock(&worker->mutex); } static void vhost_worker_flush(struct vhost_worker *worker) { mutex_lock(&worker->mutex); __vhost_worker_flush(worker); mutex_unlock(&worker->mutex); } void vhost_dev_flush(struct vhost_dev *dev) { struct vhost_worker *worker; unsigned long i; xa_for_each(&dev->worker_xa, i, worker) vhost_worker_flush(worker); } EXPORT_SYMBOL_GPL(vhost_dev_flush); /* A lockless hint for busy polling code to exit the loop */ bool vhost_vq_has_work(struct vhost_virtqueue *vq) { struct vhost_worker *worker; bool has_work = false; rcu_read_lock(); worker = rcu_dereference(vq->worker); if (worker && !llist_empty(&worker->work_list)) has_work = true; rcu_read_unlock(); return has_work; } EXPORT_SYMBOL_GPL(vhost_vq_has_work); void vhost_poll_queue(struct vhost_poll *poll) { vhost_vq_work_queue(poll->vq, &poll->work); } EXPORT_SYMBOL_GPL(vhost_poll_queue); static void __vhost_vq_meta_reset(struct vhost_virtqueue *vq) { int j; for (j = 0; j < VHOST_NUM_ADDRS; j++) vq->meta_iotlb[j] = NULL; } static void vhost_vq_meta_reset(struct vhost_dev *d) { int i; for (i = 0; i < d->nvqs; ++i) __vhost_vq_meta_reset(d->vqs[i]); } static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx) { call_ctx->ctx = NULL; memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer)); } bool vhost_vq_is_setup(struct vhost_virtqueue *vq) { return vq->avail && vq->desc && vq->used && vhost_vq_access_ok(vq); } EXPORT_SYMBOL_GPL(vhost_vq_is_setup); static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { vq->num = 1; vq->desc = NULL; vq->avail = NULL; vq->used = NULL; vq->last_avail_idx = 0; vq->avail_idx = 0; vq->last_used_idx = 0; vq->signalled_used = 0; vq->signalled_used_valid = false; vq->used_flags = 0; vq->log_used = false; vq->log_addr = -1ull; vq->private_data = NULL; vq->acked_features = 0; vq->acked_backend_features = 0; vq->log_base = NULL; vq->error_ctx = NULL; vq->kick = NULL; vq->log_ctx = NULL; vhost_disable_cross_endian(vq); vhost_reset_is_le(vq); vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; rcu_assign_pointer(vq->worker, NULL); vhost_vring_call_reset(&vq->call_ctx); __vhost_vq_meta_reset(vq); } static bool vhost_run_work_list(void *data) { struct vhost_worker *worker = data; struct vhost_work *work, *work_next; struct llist_node *node; node = llist_del_all(&worker->work_list); if (node) { __set_current_state(TASK_RUNNING); node = llist_reverse_order(node); /* make sure flag is seen after deletion */ smp_wmb(); llist_for_each_entry_safe(work, work_next, node, node) { clear_bit(VHOST_WORK_QUEUED, &work->flags); kcov_remote_start_common(worker->kcov_handle); work->fn(work); kcov_remote_stop(); cond_resched(); } } return !!node; } static void vhost_worker_killed(void *data) { struct vhost_worker *worker = data; struct vhost_dev *dev = worker->dev; struct vhost_virtqueue *vq; int i, attach_cnt = 0; mutex_lock(&worker->mutex); worker->killed = true; for (i = 0; i < dev->nvqs; i++) { vq = dev->vqs[i]; mutex_lock(&vq->mutex); if (worker == rcu_dereference_check(vq->worker, lockdep_is_held(&vq->mutex))) { rcu_assign_pointer(vq->worker, NULL); attach_cnt++; } mutex_unlock(&vq->mutex); } worker->attachment_cnt -= attach_cnt; if (attach_cnt) synchronize_rcu(); /* * Finish vhost_worker_flush calls and any other works that snuck in * before the synchronize_rcu. */ vhost_run_work_list(worker); mutex_unlock(&worker->mutex); } static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq) { kfree(vq->indirect); vq->indirect = NULL; kfree(vq->log); vq->log = NULL; kfree(vq->heads); vq->heads = NULL; } /* Helper to allocate iovec buffers for all vqs. */ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) { struct vhost_virtqueue *vq; int i; for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; vq->indirect = kmalloc_array(UIO_MAXIOV, sizeof(*vq->indirect), GFP_KERNEL); vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), GFP_KERNEL); vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), GFP_KERNEL); if (!vq->indirect || !vq->log || !vq->heads) goto err_nomem; } return 0; err_nomem: for (; i >= 0; --i) vhost_vq_free_iovecs(dev->vqs[i]); return -ENOMEM; } static void vhost_dev_free_iovecs(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) vhost_vq_free_iovecs(dev->vqs[i]); } bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len) { struct vhost_dev *dev = vq->dev; if ((dev->byte_weight && total_len >= dev->byte_weight) || pkts >= dev->weight) { vhost_poll_queue(&vq->poll); return true; } return false; } EXPORT_SYMBOL_GPL(vhost_exceeds_weight); static size_t vhost_get_avail_size(struct vhost_virtqueue *vq, unsigned int num) { size_t event __maybe_unused = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return size_add(struct_size(vq->avail, ring, num), event); } static size_t vhost_get_used_size(struct vhost_virtqueue *vq, unsigned int num) { size_t event __maybe_unused = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0; return size_add(struct_size(vq->used, ring, num), event); } static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, unsigned int num) { return sizeof(*vq->desc) * num; } void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs, int iov_limit, int weight, int byte_weight, bool use_worker, int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg)) { struct vhost_virtqueue *vq; int i; dev->vqs = vqs; dev->nvqs = nvqs; mutex_init(&dev->mutex); dev->log_ctx = NULL; dev->umem = NULL; dev->iotlb = NULL; dev->mm = NULL; dev->iov_limit = iov_limit; dev->weight = weight; dev->byte_weight = byte_weight; dev->use_worker = use_worker; dev->msg_handler = msg_handler; init_waitqueue_head(&dev->wait); INIT_LIST_HEAD(&dev->read_list); INIT_LIST_HEAD(&dev->pending_list); spin_lock_init(&dev->iotlb_lock); xa_init_flags(&dev->worker_xa, XA_FLAGS_ALLOC); for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; vq->log = NULL; vq->indirect = NULL; vq->heads = NULL; vq->dev = dev; mutex_init(&vq->mutex); vhost_vq_reset(dev, vq); if (vq->handle_kick) vhost_poll_init(&vq->poll, vq->handle_kick, EPOLLIN, dev, vq); } } EXPORT_SYMBOL_GPL(vhost_dev_init); /* Caller should have device mutex */ long vhost_dev_check_owner(struct vhost_dev *dev) { /* Are you the owner? If not, I don't think you mean to do that */ return dev->mm == current->mm ? 0 : -EPERM; } EXPORT_SYMBOL_GPL(vhost_dev_check_owner); /* Caller should have device mutex */ bool vhost_dev_has_owner(struct vhost_dev *dev) { return dev->mm; } EXPORT_SYMBOL_GPL(vhost_dev_has_owner); static void vhost_attach_mm(struct vhost_dev *dev) { /* No owner, become one */ if (dev->use_worker) { dev->mm = get_task_mm(current); } else { /* vDPA device does not use worker thead, so there's * no need to hold the address space for mm. This help * to avoid deadlock in the case of mmap() which may * held the refcnt of the file and depends on release * method to remove vma. */ dev->mm = current->mm; mmgrab(dev->mm); } } static void vhost_detach_mm(struct vhost_dev *dev) { if (!dev->mm) return; if (dev->use_worker) mmput(dev->mm); else mmdrop(dev->mm); dev->mm = NULL; } static void vhost_worker_destroy(struct vhost_dev *dev, struct vhost_worker *worker) { if (!worker) return; WARN_ON(!llist_empty(&worker->work_list)); xa_erase(&dev->worker_xa, worker->id); vhost_task_stop(worker->vtsk); kfree(worker); } static void vhost_workers_free(struct vhost_dev *dev) { struct vhost_worker *worker; unsigned long i; if (!dev->use_worker) return; for (i = 0; i < dev->nvqs; i++) rcu_assign_pointer(dev->vqs[i]->worker, NULL); /* * Free the default worker we created and cleanup workers userspace * created but couldn't clean up (it forgot or crashed). */ xa_for_each(&dev->worker_xa, i, worker) vhost_worker_destroy(dev, worker); xa_destroy(&dev->worker_xa); } static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev) { struct vhost_worker *worker; struct vhost_task *vtsk; char name[TASK_COMM_LEN]; int ret; u32 id; worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT); if (!worker) return NULL; worker->dev = dev; snprintf(name, sizeof(name), "vhost-%d", current->pid); vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed, worker, name); if (IS_ERR(vtsk)) goto free_worker; mutex_init(&worker->mutex); init_llist_head(&worker->work_list); worker->kcov_handle = kcov_common_handle(); worker->vtsk = vtsk; vhost_task_start(vtsk); ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL); if (ret < 0) goto stop_worker; worker->id = id; return worker; stop_worker: vhost_task_stop(vtsk); free_worker: kfree(worker); return NULL; } /* Caller must have device mutex */ static void __vhost_vq_attach_worker(struct vhost_virtqueue *vq, struct vhost_worker *worker) { struct vhost_worker *old_worker; mutex_lock(&worker->mutex); if (worker->killed) { mutex_unlock(&worker->mutex); return; } mutex_lock(&vq->mutex); old_worker = rcu_dereference_check(vq->worker, lockdep_is_held(&vq->mutex)); rcu_assign_pointer(vq->worker, worker); worker->attachment_cnt++; if (!old_worker) { mutex_unlock(&vq->mutex); mutex_unlock(&worker->mutex); return; } mutex_unlock(&vq->mutex); mutex_unlock(&worker->mutex); /* * Take the worker mutex to make sure we see the work queued from * device wide flushes which doesn't use RCU for execution. */ mutex_lock(&old_worker->mutex); if (old_worker->killed) { mutex_unlock(&old_worker->mutex); return; } /* * We don't want to call synchronize_rcu for every vq during setup * because it will slow down VM startup. If we haven't done * VHOST_SET_VRING_KICK and not done the driver specific * SET_ENDPOINT/RUNNUNG then we can skip the sync since there will * not be any works queued for scsi and net. */ mutex_lock(&vq->mutex); if (!vhost_vq_get_backend(vq) && !vq->kick) { mutex_unlock(&vq->mutex); old_worker->attachment_cnt--; mutex_unlock(&old_worker->mutex); /* * vsock can queue anytime after VHOST_VSOCK_SET_GUEST_CID. * Warn if it adds support for multiple workers but forgets to * handle the early queueing case. */ WARN_ON(!old_worker->attachment_cnt && !llist_empty(&old_worker->work_list)); return; } mutex_unlock(&vq->mutex); /* Make sure new vq queue/flush/poll calls see the new worker */ synchronize_rcu(); /* Make sure whatever was queued gets run */ __vhost_worker_flush(old_worker); old_worker->attachment_cnt--; mutex_unlock(&old_worker->mutex); } /* Caller must have device mutex */ static int vhost_vq_attach_worker(struct vhost_virtqueue *vq, struct vhost_vring_worker *info) { unsigned long index = info->worker_id; struct vhost_dev *dev = vq->dev; struct vhost_worker *worker; if (!dev->use_worker) return -EINVAL; worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); if (!worker || worker->id != info->worker_id) return -ENODEV; __vhost_vq_attach_worker(vq, worker); return 0; } /* Caller must have device mutex */ static int vhost_new_worker(struct vhost_dev *dev, struct vhost_worker_state *info) { struct vhost_worker *worker; worker = vhost_worker_create(dev); if (!worker) return -ENOMEM; info->worker_id = worker->id; return 0; } /* Caller must have device mutex */ static int vhost_free_worker(struct vhost_dev *dev, struct vhost_worker_state *info) { unsigned long index = info->worker_id; struct vhost_worker *worker; worker = xa_find(&dev->worker_xa, &index, UINT_MAX, XA_PRESENT); if (!worker || worker->id != info->worker_id) return -ENODEV; mutex_lock(&worker->mutex); if (worker->attachment_cnt || worker->killed) { mutex_unlock(&worker->mutex); return -EBUSY; } /* * A flush might have raced and snuck in before attachment_cnt was set * to zero. Make sure flushes are flushed from the queue before * freeing. */ __vhost_worker_flush(worker); mutex_unlock(&worker->mutex); vhost_worker_destroy(dev, worker); return 0; } static int vhost_get_vq_from_user(struct vhost_dev *dev, void __user *argp, struct vhost_virtqueue **vq, u32 *id) { u32 __user *idxp = argp; u32 idx; long r; r = get_user(idx, idxp); if (r < 0) return r; if (idx >= dev->nvqs) return -ENOBUFS; idx = array_index_nospec(idx, dev->nvqs); *vq = dev->vqs[idx]; *id = idx; return 0; } /* Caller must have device mutex */ long vhost_worker_ioctl(struct vhost_dev *dev, unsigned int ioctl, void __user *argp) { struct vhost_vring_worker ring_worker; struct vhost_worker_state state; struct vhost_worker *worker; struct vhost_virtqueue *vq; long ret; u32 idx; if (!dev->use_worker) return -EINVAL; if (!vhost_dev_has_owner(dev)) return -EINVAL; ret = vhost_dev_check_owner(dev); if (ret) return ret; switch (ioctl) { /* dev worker ioctls */ case VHOST_NEW_WORKER: ret = vhost_new_worker(dev, &state); if (!ret && copy_to_user(argp, &state, sizeof(state))) ret = -EFAULT; return ret; case VHOST_FREE_WORKER: if (copy_from_user(&state, argp, sizeof(state))) return -EFAULT; return vhost_free_worker(dev, &state); /* vring worker ioctls */ case VHOST_ATTACH_VRING_WORKER: case VHOST_GET_VRING_WORKER: break; default: return -ENOIOCTLCMD; } ret = vhost_get_vq_from_user(dev, argp, &vq, &idx); if (ret) return ret; switch (ioctl) { case VHOST_ATTACH_VRING_WORKER: if (copy_from_user(&ring_worker, argp, sizeof(ring_worker))) { ret = -EFAULT; break; } ret = vhost_vq_attach_worker(vq, &ring_worker); break; case VHOST_GET_VRING_WORKER: worker = rcu_dereference_check(vq->worker, lockdep_is_held(&dev->mutex)); if (!worker) { ret = -EINVAL; break; } ring_worker.index = idx; ring_worker.worker_id = worker->id; if (copy_to_user(argp, &ring_worker, sizeof(ring_worker))) ret = -EFAULT; break; default: ret = -ENOIOCTLCMD; break; } return ret; } EXPORT_SYMBOL_GPL(vhost_worker_ioctl); /* Caller should have device mutex */ long vhost_dev_set_owner(struct vhost_dev *dev) { struct vhost_worker *worker; int err, i; /* Is there an owner already? */ if (vhost_dev_has_owner(dev)) { err = -EBUSY; goto err_mm; } vhost_attach_mm(dev); err = vhost_dev_alloc_iovecs(dev); if (err) goto err_iovecs; if (dev->use_worker) { /* * This should be done last, because vsock can queue work * before VHOST_SET_OWNER so it simplifies the failure path * below since we don't have to worry about vsock queueing * while we free the worker. */ worker = vhost_worker_create(dev); if (!worker) { err = -ENOMEM; goto err_worker; } for (i = 0; i < dev->nvqs; i++) __vhost_vq_attach_worker(dev->vqs[i], worker); } return 0; err_worker: vhost_dev_free_iovecs(dev); err_iovecs: vhost_detach_mm(dev); err_mm: return err; } EXPORT_SYMBOL_GPL(vhost_dev_set_owner); static struct vhost_iotlb *iotlb_alloc(void) { return vhost_iotlb_alloc(max_iotlb_entries, VHOST_IOTLB_FLAG_RETIRE); } struct vhost_iotlb *vhost_dev_reset_owner_prepare(void) { return iotlb_alloc(); } EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); /* Caller should have device mutex */ void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem) { int i; vhost_dev_cleanup(dev); dev->umem = umem; /* We don't need VQ locks below since vhost_dev_cleanup makes sure * VQs aren't running. */ for (i = 0; i < dev->nvqs; ++i) dev->vqs[i]->umem = umem; } EXPORT_SYMBOL_GPL(vhost_dev_reset_owner); void vhost_dev_stop(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) vhost_poll_stop(&dev->vqs[i]->poll); } vhost_dev_flush(dev); } EXPORT_SYMBOL_GPL(vhost_dev_stop); void vhost_clear_msg(struct vhost_dev *dev) { struct vhost_msg_node *node, *n; spin_lock(&dev->iotlb_lock); list_for_each_entry_safe(node, n, &dev->read_list, node) { list_del(&node->node); kfree(node); } list_for_each_entry_safe(node, n, &dev->pending_list, node) { list_del(&node->node); kfree(node); } spin_unlock(&dev->iotlb_lock); } EXPORT_SYMBOL_GPL(vhost_clear_msg); void vhost_dev_cleanup(struct vhost_dev *dev) { int i; for (i = 0; i < dev->nvqs; ++i) { if (dev->vqs[i]->error_ctx) eventfd_ctx_put(dev->vqs[i]->error_ctx); if (dev->vqs[i]->kick) fput(dev->vqs[i]->kick); if (dev->vqs[i]->call_ctx.ctx) eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx); vhost_vq_reset(dev, dev->vqs[i]); } vhost_dev_free_iovecs(dev); if (dev->log_ctx) eventfd_ctx_put(dev->log_ctx); dev->log_ctx = NULL; /* No one will access memory at this point */ vhost_iotlb_free(dev->umem); dev->umem = NULL; vhost_iotlb_free(dev->iotlb); dev->iotlb = NULL; vhost_clear_msg(dev); wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); vhost_workers_free(dev); vhost_detach_mm(dev); } EXPORT_SYMBOL_GPL(vhost_dev_cleanup); static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz) { u64 a = addr / VHOST_PAGE_SIZE / 8; /* Make sure 64 bit math will not overflow. */ if (a > ULONG_MAX - (unsigned long)log_base || a + (unsigned long)log_base > ULONG_MAX) return false; return access_ok(log_base + a, (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); } /* Make sure 64 bit math will not overflow. */ static bool vhost_overflow(u64 uaddr, u64 size) { if (uaddr > ULONG_MAX || size > ULONG_MAX) return true; if (!size) return false; return uaddr > ULONG_MAX - size + 1; } /* Caller should have vq mutex and device mutex. */ static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem, int log_all) { struct vhost_iotlb_map *map; if (!umem) return false; list_for_each_entry(map, &umem->list, link) { unsigned long a = map->addr; if (vhost_overflow(map->addr, map->size)) return false; if (!access_ok((void __user *)a, map->size)) return false; else if (log_all && !log_access_ok(log_base, map->start, map->size)) return false; } return true; } static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, u64 addr, unsigned int size, int type) { const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; if (!map) return NULL; return (void __user *)(uintptr_t)(map->addr + addr - map->start); } /* Can we switch to this memory table? */ /* Caller should have device mutex but not vq mutex */ static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem, int log_all) { int i; for (i = 0; i < d->nvqs; ++i) { bool ok; bool log; mutex_lock(&d->vqs[i]->mutex); log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL); /* If ring is inactive, will check when it's enabled. */ if (d->vqs[i]->private_data) ok = vq_memory_access_ok(d->vqs[i]->log_base, umem, log); else ok = true; mutex_unlock(&d->vqs[i]->mutex); if (!ok) return false; } return true; } static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct iovec iov[], int iov_size, int access); static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to, const void *from, unsigned size) { int ret; if (!vq->iotlb) return __copy_to_user(to, from, size); else { /* This function should be called after iotlb * prefetch, which means we're sure that all vq * could be access through iotlb. So -EAGAIN should * not happen in this case. */ struct iov_iter t; void __user *uaddr = vhost_vq_meta_fetch(vq, (u64)(uintptr_t)to, size, VHOST_ADDR_USED); if (uaddr) return __copy_to_user(uaddr, from, size); ret = translate_desc(vq, (u64)(uintptr_t)to, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_WO); if (ret < 0) goto out; iov_iter_init(&t, ITER_DEST, vq->iotlb_iov, ret, size); ret = copy_to_iter(from, size, &t); if (ret == size) ret = 0; } out: return ret; } static int vhost_copy_from_user(struct vhost_virtqueue *vq, void *to, void __user *from, unsigned size) { int ret; if (!vq->iotlb) return __copy_from_user(to, from, size); else { /* This function should be called after iotlb * prefetch, which means we're sure that vq * could be access through iotlb. So -EAGAIN should * not happen in this case. */ void __user *uaddr = vhost_vq_meta_fetch(vq, (u64)(uintptr_t)from, size, VHOST_ADDR_DESC); struct iov_iter f; if (uaddr) return __copy_from_user(to, uaddr, size); ret = translate_desc(vq, (u64)(uintptr_t)from, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_RO); if (ret < 0) { vq_err(vq, "IOTLB translation failure: uaddr " "%p size 0x%llx\n", from, (unsigned long long) size); goto out; } iov_iter_init(&f, ITER_SOURCE, vq->iotlb_iov, ret, size); ret = copy_from_iter(to, size, &f); if (ret == size) ret = 0; } out: return ret; } static void __user *__vhost_get_user_slow(struct vhost_virtqueue *vq, void __user *addr, unsigned int size, int type) { int ret; ret = translate_desc(vq, (u64)(uintptr_t)addr, size, vq->iotlb_iov, ARRAY_SIZE(vq->iotlb_iov), VHOST_ACCESS_RO); if (ret < 0) { vq_err(vq, "IOTLB translation failure: uaddr " "%p size 0x%llx\n", addr, (unsigned long long) size); return NULL; } if (ret != 1 || vq->iotlb_iov[0].iov_len != size) { vq_err(vq, "Non atomic userspace memory access: uaddr " "%p size 0x%llx\n", addr, (unsigned long long) size); return NULL; } return vq->iotlb_iov[0].iov_base; } /* This function should be called after iotlb * prefetch, which means we're sure that vq * could be access through iotlb. So -EAGAIN should * not happen in this case. */ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq, void __user *addr, unsigned int size, int type) { void __user *uaddr = vhost_vq_meta_fetch(vq, (u64)(uintptr_t)addr, size, type); if (uaddr) return uaddr; return __vhost_get_user_slow(vq, addr, size, type); } #define vhost_put_user(vq, x, ptr) \ ({ \ int ret; \ if (!vq->iotlb) { \ ret = __put_user(x, ptr); \ } else { \ __typeof__(ptr) to = \ (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ sizeof(*ptr), VHOST_ADDR_USED); \ if (to != NULL) \ ret = __put_user(x, to); \ else \ ret = -EFAULT; \ } \ ret; \ }) static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) { return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)); } static inline int vhost_put_used(struct vhost_virtqueue *vq, struct vring_used_elem *head, int idx, int count) { return vhost_copy_to_user(vq, vq->used->ring + idx, head, count * sizeof(*head)); } static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) { return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags); } static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) { return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx); } #define vhost_get_user(vq, x, ptr, type) \ ({ \ int ret; \ if (!vq->iotlb) { \ ret = __get_user(x, ptr); \ } else { \ __typeof__(ptr) from = \ (__typeof__(ptr)) __vhost_get_user(vq, ptr, \ sizeof(*ptr), \ type); \ if (from != NULL) \ ret = __get_user(x, from); \ else \ ret = -EFAULT; \ } \ ret; \ }) #define vhost_get_avail(vq, x, ptr) \ vhost_get_user(vq, x, ptr, VHOST_ADDR_AVAIL) #define vhost_get_used(vq, x, ptr) \ vhost_get_user(vq, x, ptr, VHOST_ADDR_USED) static void vhost_dev_lock_vqs(struct vhost_dev *d) { int i = 0; for (i = 0; i < d->nvqs; ++i) mutex_lock_nested(&d->vqs[i]->mutex, i); } static void vhost_dev_unlock_vqs(struct vhost_dev *d) { int i = 0; for (i = 0; i < d->nvqs; ++i) mutex_unlock(&d->vqs[i]->mutex); } static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq) { __virtio16 idx; int r; r = vhost_get_avail(vq, idx, &vq->avail->idx); if (unlikely(r < 0)) { vq_err(vq, "Failed to access available index at %p (%d)\n", &vq->avail->idx, r); return r; } /* Check it isn't doing very strange thing with available indexes */ vq->avail_idx = vhost16_to_cpu(vq, idx); if (unlikely((u16)(vq->avail_idx - vq->last_avail_idx) > vq->num)) { vq_err(vq, "Invalid available index change from %u to %u", vq->last_avail_idx, vq->avail_idx); return -EINVAL; } /* We're done if there is nothing new */ if (vq->avail_idx == vq->last_avail_idx) return 0; /* * We updated vq->avail_idx so we need a memory barrier between * the index read above and the caller reading avail ring entries. */ smp_rmb(); return 1; } static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, __virtio16 *head, int idx) { return vhost_get_avail(vq, *head, &vq->avail->ring[idx & (vq->num - 1)]); } static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, __virtio16 *flags) { return vhost_get_avail(vq, *flags, &vq->avail->flags); } static inline int vhost_get_used_event(struct vhost_virtqueue *vq, __virtio16 *event) { return vhost_get_avail(vq, *event, vhost_used_event(vq)); } static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, __virtio16 *idx) { return vhost_get_used(vq, *idx, &vq->used->idx); } static inline int vhost_get_desc(struct vhost_virtqueue *vq, struct vring_desc *desc, int idx) { return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); } static void vhost_iotlb_notify_vq(struct vhost_dev *d, struct vhost_iotlb_msg *msg) { struct vhost_msg_node *node, *n; spin_lock(&d->iotlb_lock); list_for_each_entry_safe(node, n, &d->pending_list, node) { struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb; if (msg->iova <= vq_msg->iova && msg->iova + msg->size - 1 >= vq_msg->iova && vq_msg->type == VHOST_IOTLB_MISS) { vhost_poll_queue(&node->vq->poll); list_del(&node->node); kfree(node); } } spin_unlock(&d->iotlb_lock); } static bool umem_access_ok(u64 uaddr, u64 size, int access) { unsigned long a = uaddr; /* Make sure 64 bit math will not overflow. */ if (vhost_overflow(uaddr, size)) return false; if ((access & VHOST_ACCESS_RO) && !access_ok((void __user *)a, size)) return false; if ((access & VHOST_ACCESS_WO) && !access_ok((void __user *)a, size)) return false; return true; } static int vhost_process_iotlb_msg(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg) { int ret = 0; if (asid != 0) return -EINVAL; mutex_lock(&dev->mutex); vhost_dev_lock_vqs(dev); switch (msg->type) { case VHOST_IOTLB_UPDATE: if (!dev->iotlb) { ret = -EFAULT; break; } if (!umem_access_ok(msg->uaddr, msg->size, msg->perm)) { ret = -EFAULT; break; } vhost_vq_meta_reset(dev); if (vhost_iotlb_add_range(dev->iotlb, msg->iova, msg->iova + msg->size - 1, msg->uaddr, msg->perm)) { ret = -ENOMEM; break; } vhost_iotlb_notify_vq(dev, msg); break; case VHOST_IOTLB_INVALIDATE: if (!dev->iotlb) { ret = -EFAULT; break; } vhost_vq_meta_reset(dev); vhost_iotlb_del_range(dev->iotlb, msg->iova, msg->iova + msg->size - 1); break; default: ret = -EINVAL; break; } vhost_dev_unlock_vqs(dev); mutex_unlock(&dev->mutex); return ret; } ssize_t vhost_chr_write_iter(struct vhost_dev *dev, struct iov_iter *from) { struct vhost_iotlb_msg msg; size_t offset; int type, ret; u32 asid = 0; ret = copy_from_iter(&type, sizeof(type), from); if (ret != sizeof(type)) { ret = -EINVAL; goto done; } switch (type) { case VHOST_IOTLB_MSG: /* There maybe a hole after type for V1 message type, * so skip it here. */ offset = offsetof(struct vhost_msg, iotlb) - sizeof(int); break; case VHOST_IOTLB_MSG_V2: if (vhost_backend_has_feature(dev->vqs[0], VHOST_BACKEND_F_IOTLB_ASID)) { ret = copy_from_iter(&asid, sizeof(asid), from); if (ret != sizeof(asid)) { ret = -EINVAL; goto done; } offset = 0; } else offset = sizeof(__u32); break; default: ret = -EINVAL; goto done; } iov_iter_advance(from, offset); ret = copy_from_iter(&msg, sizeof(msg), from); if (ret != sizeof(msg)) { ret = -EINVAL; goto done; } if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) { ret = -EINVAL; goto done; } if (dev->msg_handler) ret = dev->msg_handler(dev, asid, &msg); else ret = vhost_process_iotlb_msg(dev, asid, &msg); if (ret) { ret = -EFAULT; goto done; } ret = (type == VHOST_IOTLB_MSG) ? sizeof(struct vhost_msg) : sizeof(struct vhost_msg_v2); done: return ret; } EXPORT_SYMBOL(vhost_chr_write_iter); __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, poll_table *wait) { __poll_t mask = 0; poll_wait(file, &dev->wait, wait); if (!list_empty(&dev->read_list)) mask |= EPOLLIN | EPOLLRDNORM; return mask; } EXPORT_SYMBOL(vhost_chr_poll); ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, int noblock) { DEFINE_WAIT(wait); struct vhost_msg_node *node; ssize_t ret = 0; unsigned size = sizeof(struct vhost_msg); if (iov_iter_count(to) < size) return 0; while (1) { if (!noblock) prepare_to_wait(&dev->wait, &wait, TASK_INTERRUPTIBLE); node = vhost_dequeue_msg(dev, &dev->read_list); if (node) break; if (noblock) { ret = -EAGAIN; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } if (!dev->iotlb) { ret = -EBADFD; break; } schedule(); } if (!noblock) finish_wait(&dev->wait, &wait); if (node) { struct vhost_iotlb_msg *msg; void *start = &node->msg; switch (node->msg.type) { case VHOST_IOTLB_MSG: size = sizeof(node->msg); msg = &node->msg.iotlb; break; case VHOST_IOTLB_MSG_V2: size = sizeof(node->msg_v2); msg = &node->msg_v2.iotlb; break; default: BUG(); break; } ret = copy_to_iter(start, size, to); if (ret != size || msg->type != VHOST_IOTLB_MISS) { kfree(node); return ret; } vhost_enqueue_msg(dev, &dev->pending_list, node); } return ret; } EXPORT_SYMBOL_GPL(vhost_chr_read_iter); static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access) { struct vhost_dev *dev = vq->dev; struct vhost_msg_node *node; struct vhost_iotlb_msg *msg; bool v2 = vhost_backend_has_feature(vq, VHOST_BACKEND_F_IOTLB_MSG_V2); node = vhost_new_msg(vq, v2 ? VHOST_IOTLB_MSG_V2 : VHOST_IOTLB_MSG); if (!node) return -ENOMEM; if (v2) { node->msg_v2.type = VHOST_IOTLB_MSG_V2; msg = &node->msg_v2.iotlb; } else { msg = &node->msg.iotlb; } msg->type = VHOST_IOTLB_MISS; msg->iova = iova; msg->perm = access; vhost_enqueue_msg(dev, &dev->read_list, node); return 0; } static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, vring_desc_t __user *desc, vring_avail_t __user *avail, vring_used_t __user *used) { /* If an IOTLB device is present, the vring addresses are * GIOVAs. Access validation occurs at prefetch time. */ if (vq->iotlb) return true; return access_ok(desc, vhost_get_desc_size(vq, num)) && access_ok(avail, vhost_get_avail_size(vq, num)) && access_ok(used, vhost_get_used_size(vq, num)); } static void vhost_vq_meta_update(struct vhost_virtqueue *vq, const struct vhost_iotlb_map *map, int type) { int access = (type == VHOST_ADDR_USED) ? VHOST_ACCESS_WO : VHOST_ACCESS_RO; if (likely(map->perm & access)) vq->meta_iotlb[type] = map; } static bool iotlb_access_ok(struct vhost_virtqueue *vq, int access, u64 addr, u64 len, int type) { const struct vhost_iotlb_map *map; struct vhost_iotlb *umem = vq->iotlb; u64 s = 0, size, orig_addr = addr, last = addr + len - 1; if (vhost_vq_meta_fetch(vq, addr, len, type)) return true; while (len > s) { map = vhost_iotlb_itree_first(umem, addr, last); if (map == NULL || map->start > addr) { vhost_iotlb_miss(vq, addr, access); return false; } else if (!(map->perm & access)) { /* Report the possible access violation by * request another translation from userspace. */ return false; } size = map->size - addr + map->start; if (orig_addr == addr && size >= len) vhost_vq_meta_update(vq, map, type); s += size; addr += size; } return true; } int vq_meta_prefetch(struct vhost_virtqueue *vq) { unsigned int num = vq->num; if (!vq->iotlb) return 1; return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, vhost_get_avail_size(vq, num), VHOST_ADDR_AVAIL) && iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, vhost_get_used_size(vq, num), VHOST_ADDR_USED); } EXPORT_SYMBOL_GPL(vq_meta_prefetch); /* Can we log writes? */ /* Caller should have device mutex but not vq mutex */ bool vhost_log_access_ok(struct vhost_dev *dev) { return memory_access_ok(dev, dev->umem, 1); } EXPORT_SYMBOL_GPL(vhost_log_access_ok); static bool vq_log_used_access_ok(struct vhost_virtqueue *vq, void __user *log_base, bool log_used, u64 log_addr) { /* If an IOTLB device is present, log_addr is a GIOVA that * will never be logged by log_used(). */ if (vq->iotlb) return true; return !log_used || log_access_ok(log_base, log_addr, vhost_get_used_size(vq, vq->num)); } /* Verify access for write logging. */ /* Caller should have vq mutex and device mutex */ static bool vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) { return vq_memory_access_ok(log_base, vq->umem, vhost_has_feature(vq, VHOST_F_LOG_ALL)) && vq_log_used_access_ok(vq, log_base, vq->log_used, vq->log_addr); } /* Can we start vq? */ /* Caller should have vq mutex and device mutex */ bool vhost_vq_access_ok(struct vhost_virtqueue *vq) { if (!vq_log_access_ok(vq, vq->log_base)) return false; return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem; struct vhost_memory_region *region; struct vhost_iotlb *newumem, *oldumem; unsigned long size = offsetof(struct vhost_memory, regions); int i; if (copy_from_user(&mem, m, size)) return -EFAULT; if (mem.padding) return -EOPNOTSUPP; if (mem.nregions > max_mem_regions) return -E2BIG; newmem = kvzalloc(struct_size(newmem, regions, mem.nregions), GFP_KERNEL); if (!newmem) return -ENOMEM; memcpy(newmem, &mem, size); if (copy_from_user(newmem->regions, m->regions, flex_array_size(newmem, regions, mem.nregions))) { kvfree(newmem); return -EFAULT; } newumem = iotlb_alloc(); if (!newumem) { kvfree(newmem); return -ENOMEM; } for (region = newmem->regions; region < newmem->regions + mem.nregions; region++) { if (vhost_iotlb_add_range(newumem, region->guest_phys_addr, region->guest_phys_addr + region->memory_size - 1, region->userspace_addr, VHOST_MAP_RW)) goto err; } if (!memory_access_ok(d, newumem, 0)) goto err; oldumem = d->umem; d->umem = newumem; /* All memory accesses are done under some VQ mutex. */ for (i = 0; i < d->nvqs; ++i) { mutex_lock(&d->vqs[i]->mutex); d->vqs[i]->umem = newumem; mutex_unlock(&d->vqs[i]->mutex); } kvfree(newmem); vhost_iotlb_free(oldumem); return 0; err: vhost_iotlb_free(newumem); kvfree(newmem); return -EFAULT; } static long vhost_vring_set_num(struct vhost_dev *d, struct vhost_virtqueue *vq, void __user *argp) { struct vhost_vring_state s; /* Resizing ring with an active backend? * You don't want to do that. */ if (vq->private_data) return -EBUSY; if (copy_from_user(&s, argp, sizeof s)) return -EFAULT; if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) return -EINVAL; vq->num = s.num; return 0; } static long vhost_vring_set_addr(struct vhost_dev *d, struct vhost_virtqueue *vq, void __user *argp) { struct vhost_vring_addr a; if (copy_from_user(&a, argp, sizeof a)) return -EFAULT; if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) return -EOPNOTSUPP; /* For 32bit, verify that the top 32bits of the user data are set to zero. */ if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr || (u64)(unsigned long)a.used_user_addr != a.used_user_addr || (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) return -EFAULT; /* Make sure it's safe to cast pointers to vring types. */ BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE); BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE); if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) || (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) || (a.log_guest_addr & (VRING_USED_ALIGN_SIZE - 1))) return -EINVAL; /* We only verify access here if backend is configured. * If it is not, we don't as size might not have been setup. * We will verify when backend is configured. */ if (vq->private_data) { if (!vq_access_ok(vq, vq->num, (void __user *)(unsigned long)a.desc_user_addr, (void __user *)(unsigned long)a.avail_user_addr, (void __user *)(unsigned long)a.used_user_addr)) return -EINVAL; /* Also validate log access for used ring if enabled. */ if (!vq_log_used_access_ok(vq, vq->log_base, a.flags & (0x1 << VHOST_VRING_F_LOG), a.log_guest_addr)) return -EINVAL; } vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG)); vq->desc = (void __user *)(unsigned long)a.desc_user_addr; vq->avail = (void __user *)(unsigned long)a.avail_user_addr; vq->log_addr = a.log_guest_addr; vq->used = (void __user *)(unsigned long)a.used_user_addr; return 0; } static long vhost_vring_set_num_addr(struct vhost_dev *d, struct vhost_virtqueue *vq, unsigned int ioctl, void __user *argp) { long r; mutex_lock(&vq->mutex); switch (ioctl) { case VHOST_SET_VRING_NUM: r = vhost_vring_set_num(d, vq, argp); break; case VHOST_SET_VRING_ADDR: r = vhost_vring_set_addr(d, vq, argp); break; default: BUG(); } mutex_unlock(&vq->mutex); return r; } long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) { struct file *eventfp, *filep = NULL; bool pollstart = false, pollstop = false; struct eventfd_ctx *ctx = NULL; struct vhost_virtqueue *vq; struct vhost_vring_state s; struct vhost_vring_file f; u32 idx; long r; r = vhost_get_vq_from_user(d, argp, &vq, &idx); if (r < 0) return r; if (ioctl == VHOST_SET_VRING_NUM || ioctl == VHOST_SET_VRING_ADDR) { return vhost_vring_set_num_addr(d, vq, ioctl, argp); } mutex_lock(&vq->mutex); switch (ioctl) { case VHOST_SET_VRING_BASE: /* Moving base with an active backend? * You don't want to do that. */ if (vq->private_data) { r = -EBUSY; break; } if (copy_from_user(&s, argp, sizeof s)) { r = -EFAULT; break; } if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) { vq->last_avail_idx = s.num & 0xffff; vq->last_used_idx = (s.num >> 16) & 0xffff; } else { if (s.num > 0xffff) { r = -EINVAL; break; } vq->last_avail_idx = s.num; } /* Forget the cached index value. */ vq->avail_idx = vq->last_avail_idx; break; case VHOST_GET_VRING_BASE: s.index = idx; if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) s.num = (u32)vq->last_avail_idx | ((u32)vq->last_used_idx << 16); else s.num = vq->last_avail_idx; if (copy_to_user(argp, &s, sizeof s)) r = -EFAULT; break; case VHOST_SET_VRING_KICK: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } eventfp = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_fget(f.fd); if (IS_ERR(eventfp)) { r = PTR_ERR(eventfp); break; } if (eventfp != vq->kick) { pollstop = (filep = vq->kick) != NULL; pollstart = (vq->kick = eventfp) != NULL; } else filep = eventfp; break; case VHOST_SET_VRING_CALL: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); if (IS_ERR(ctx)) { r = PTR_ERR(ctx); break; } swap(ctx, vq->call_ctx.ctx); break; case VHOST_SET_VRING_ERR: if (copy_from_user(&f, argp, sizeof f)) { r = -EFAULT; break; } ctx = f.fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(f.fd); if (IS_ERR(ctx)) { r = PTR_ERR(ctx); break; } swap(ctx, vq->error_ctx); break; case VHOST_SET_VRING_ENDIAN: r = vhost_set_vring_endian(vq, argp); break; case VHOST_GET_VRING_ENDIAN: r = vhost_get_vring_endian(vq, idx, argp); break; case VHOST_SET_VRING_BUSYLOOP_TIMEOUT: if (copy_from_user(&s, argp, sizeof(s))) { r = -EFAULT; break; } vq->busyloop_timeout = s.num; break; case VHOST_GET_VRING_BUSYLOOP_TIMEOUT: s.index = idx; s.num = vq->busyloop_timeout; if (copy_to_user(argp, &s, sizeof(s))) r = -EFAULT; break; default: r = -ENOIOCTLCMD; } if (pollstop && vq->handle_kick) vhost_poll_stop(&vq->poll); if (!IS_ERR_OR_NULL(ctx)) eventfd_ctx_put(ctx); if (filep) fput(filep); if (pollstart && vq->handle_kick) r = vhost_poll_start(&vq->poll, vq->kick); mutex_unlock(&vq->mutex); if (pollstop && vq->handle_kick) vhost_dev_flush(vq->poll.dev); return r; } EXPORT_SYMBOL_GPL(vhost_vring_ioctl); int vhost_init_device_iotlb(struct vhost_dev *d) { struct vhost_iotlb *niotlb, *oiotlb; int i; niotlb = iotlb_alloc(); if (!niotlb) return -ENOMEM; oiotlb = d->iotlb; d->iotlb = niotlb; for (i = 0; i < d->nvqs; ++i) { struct vhost_virtqueue *vq = d->vqs[i]; mutex_lock(&vq->mutex); vq->iotlb = niotlb; __vhost_vq_meta_reset(vq); mutex_unlock(&vq->mutex); } vhost_iotlb_free(oiotlb); return 0; } EXPORT_SYMBOL_GPL(vhost_init_device_iotlb); /* Caller must have device mutex */ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp) { struct eventfd_ctx *ctx; u64 p; long r; int i, fd; /* If you are not the owner, you can become one */ if (ioctl == VHOST_SET_OWNER) { r = vhost_dev_set_owner(d); goto done; } /* You must be the owner to do anything else */ r = vhost_dev_check_owner(d); if (r) goto done; switch (ioctl) { case VHOST_SET_MEM_TABLE: r = vhost_set_memory(d, argp); break; case VHOST_SET_LOG_BASE: if (copy_from_user(&p, argp, sizeof p)) { r = -EFAULT; break; } if ((u64)(unsigned long)p != p) { r = -EFAULT; break; } for (i = 0; i < d->nvqs; ++i) { struct vhost_virtqueue *vq; void __user *base = (void __user *)(unsigned long)p; vq = d->vqs[i]; mutex_lock(&vq->mutex); /* If ring is inactive, will check when it's enabled. */ if (vq->private_data && !vq_log_access_ok(vq, base)) r = -EFAULT; else vq->log_base = base; mutex_unlock(&vq->mutex); } break; case VHOST_SET_LOG_FD: r = get_user(fd, (int __user *)argp); if (r < 0) break; ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd); if (IS_ERR(ctx)) { r = PTR_ERR(ctx); break; } swap(ctx, d->log_ctx); for (i = 0; i < d->nvqs; ++i) { mutex_lock(&d->vqs[i]->mutex); d->vqs[i]->log_ctx = d->log_ctx; mutex_unlock(&d->vqs[i]->mutex); } if (ctx) eventfd_ctx_put(ctx); break; default: r = -ENOIOCTLCMD; break; } done: return r; } EXPORT_SYMBOL_GPL(vhost_dev_ioctl); /* TODO: This is really inefficient. We need something like get_user() * (instruction directly accesses the data, with an exception table entry * returning -EFAULT). See Documentation/arch/x86/exception-tables.rst. */ static int set_bit_to_user(int nr, void __user *addr) { unsigned long log = (unsigned long)addr; struct page *page; void *base; int bit = nr + (log % PAGE_SIZE) * 8; int r; r = pin_user_pages_fast(log, 1, FOLL_WRITE, &page); if (r < 0) return r; BUG_ON(r != 1); base = kmap_atomic(page); set_bit(bit, base); kunmap_atomic(base); unpin_user_pages_dirty_lock(&page, 1, true); return 0; } static int log_write(void __user *log_base, u64 write_address, u64 write_length) { u64 write_page = write_address / VHOST_PAGE_SIZE; int r; if (!write_length) return 0; write_length += write_address % VHOST_PAGE_SIZE; for (;;) { u64 base = (u64)(unsigned long)log_base; u64 log = base + write_page / 8; int bit = write_page % 8; if ((u64)(unsigned long)log != log) return -EFAULT; r = set_bit_to_user(bit, (void __user *)(unsigned long)log); if (r < 0) return r; if (write_length <= VHOST_PAGE_SIZE) break; write_length -= VHOST_PAGE_SIZE; write_page += 1; } return r; } static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) { struct vhost_iotlb *umem = vq->umem; struct vhost_iotlb_map *u; u64 start, end, l, min; int r; bool hit = false; while (len) { min = len; /* More than one GPAs can be mapped into a single HVA. So * iterate all possible umems here to be safe. */ list_for_each_entry(u, &umem->list, link) { if (u->addr > hva - 1 + len || u->addr - 1 + u->size < hva) continue; start = max(u->addr, hva); end = min(u->addr - 1 + u->size, hva - 1 + len); l = end - start + 1; r = log_write(vq->log_base, u->start + start - u->addr, l); if (r < 0) return r; hit = true; min = min(l, min); } if (!hit) return -EFAULT; len -= min; hva += min; } return 0; } static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) { struct iovec *iov = vq->log_iov; int i, ret; if (!vq->iotlb) return log_write(vq->log_base, vq->log_addr + used_offset, len); ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, len, iov, 64, VHOST_ACCESS_WO); if (ret < 0) return ret; for (i = 0; i < ret; i++) { ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, iov[i].iov_len); if (ret) return ret; } return 0; } /* * vhost_log_write() - Log in dirty page bitmap * @vq: vhost virtqueue. * @log: Array of dirty memory in GPA. * @log_num: Size of vhost_log arrary. * @len: The total length of memory buffer to log in the dirty bitmap. * Some drivers may only partially use pages shared via the last * vring descriptor (i.e. vhost-net RX buffer). * Use (len == U64_MAX) to indicate the driver would log all * pages of vring descriptors. * @iov: Array of dirty memory in HVA. * @count: Size of iovec array. */ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len, struct iovec *iov, int count) { int i, r; /* Make sure data written is seen before log. */ smp_wmb(); if (vq->iotlb) { for (i = 0; i < count; i++) { r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, iov[i].iov_len); if (r < 0) return r; } return 0; } for (i = 0; i < log_num; ++i) { u64 l = min(log[i].len, len); r = log_write(vq->log_base, log[i].addr, l); if (r < 0) return r; if (len != U64_MAX) len -= l; } if (vq->log_ctx) eventfd_signal(vq->log_ctx); return 0; } EXPORT_SYMBOL_GPL(vhost_log_write); static int vhost_update_used_flags(struct vhost_virtqueue *vq) { void __user *used; if (vhost_put_used_flags(vq)) return -EFAULT; if (unlikely(vq->log_used)) { /* Make sure the flag is seen before log. */ smp_wmb(); /* Log used flag write. */ used = &vq->used->flags; log_used(vq, (used - (void __user *)vq->used), sizeof vq->used->flags); if (vq->log_ctx) eventfd_signal(vq->log_ctx); } return 0; } static int vhost_update_avail_event(struct vhost_virtqueue *vq) { if (vhost_put_avail_event(vq)) return -EFAULT; if (unlikely(vq->log_used)) { void __user *used; /* Make sure the event is seen before log. */ smp_wmb(); /* Log avail event write */ used = vhost_avail_event(vq); log_used(vq, (used - (void __user *)vq->used), sizeof *vhost_avail_event(vq)); if (vq->log_ctx) eventfd_signal(vq->log_ctx); } return 0; } int vhost_vq_init_access(struct vhost_virtqueue *vq) { __virtio16 last_used_idx; int r; bool is_le = vq->is_le; if (!vq->private_data) return 0; vhost_init_is_le(vq); r = vhost_update_used_flags(vq); if (r) goto err; vq->signalled_used_valid = false; if (!vq->iotlb && !access_ok(&vq->used->idx, sizeof vq->used->idx)) { r = -EFAULT; goto err; } r = vhost_get_used_idx(vq, &last_used_idx); if (r) { vq_err(vq, "Can't access used idx at %p\n", &vq->used->idx); goto err; } vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); return 0; err: vq->is_le = is_le; return r; } EXPORT_SYMBOL_GPL(vhost_vq_init_access); static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct iovec iov[], int iov_size, int access) { const struct vhost_iotlb_map *map; struct vhost_dev *dev = vq->dev; struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; struct iovec *_iov; u64 s = 0, last = addr + len - 1; int ret = 0; while ((u64)len > s) { u64 size; if (unlikely(ret >= iov_size)) { ret = -ENOBUFS; break; } map = vhost_iotlb_itree_first(umem, addr, last); if (map == NULL || map->start > addr) { if (umem != dev->iotlb) { ret = -EFAULT; break; } ret = -EAGAIN; break; } else if (!(map->perm & access)) { ret = -EPERM; break; } _iov = iov + ret; size = map->size - addr + map->start; _iov->iov_len = min((u64)len - s, size); _iov->iov_base = (void __user *)(unsigned long) (map->addr + addr - map->start); s += size; addr += size; ++ret; } if (ret == -EAGAIN) vhost_iotlb_miss(vq, addr, access); return ret; } /* Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, * or -1U if we're at the end. */ static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc) { unsigned int next; /* If this descriptor says it doesn't chain, we're done. */ if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT))) return -1U; /* Check they're not leading us off end of descriptors. */ next = vhost16_to_cpu(vq, READ_ONCE(desc->next)); return next; } static int get_indirect(struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num, struct vring_desc *indirect) { struct vring_desc desc; unsigned int i = 0, count, found = 0; u32 len = vhost32_to_cpu(vq, indirect->len); struct iov_iter from; int ret, access; /* Sanity check */ if (unlikely(len % sizeof desc)) { vq_err(vq, "Invalid length in indirect descriptor: " "len 0x%llx not multiple of 0x%zx\n", (unsigned long long)len, sizeof desc); return -EINVAL; } ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect, UIO_MAXIOV, VHOST_ACCESS_RO); if (unlikely(ret < 0)) { if (ret != -EAGAIN) vq_err(vq, "Translation failure %d in indirect.\n", ret); return ret; } iov_iter_init(&from, ITER_SOURCE, vq->indirect, ret, len); count = len / sizeof desc; /* Buffers are chained via a 16 bit next field, so * we can have at most 2^16 of these. */ if (unlikely(count > USHRT_MAX + 1)) { vq_err(vq, "Indirect buffer length too big: %d\n", indirect->len); return -E2BIG; } do { unsigned iov_count = *in_num + *out_num; if (unlikely(++found > count)) { vq_err(vq, "Loop detected: last one at %u " "indirect size %u\n", i, count); return -EINVAL; } if (unlikely(!copy_from_iter_full(&desc, sizeof(desc), &from))) { vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; } if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) { vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; } if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) access = VHOST_ACCESS_WO; else access = VHOST_ACCESS_RO; ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), vhost32_to_cpu(vq, desc.len), iov + iov_count, iov_size - iov_count, access); if (unlikely(ret < 0)) { if (ret != -EAGAIN) vq_err(vq, "Translation failure %d indirect idx %d\n", ret, i); return ret; } /* If this is an input descriptor, increment that count. */ if (access == VHOST_ACCESS_WO) { *in_num += ret; if (unlikely(log && ret)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; } } else { /* If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (unlikely(*in_num)) { vq_err(vq, "Indirect descriptor " "has out after in: idx %d\n", i); return -EINVAL; } *out_num += ret; } } while ((i = next_desc(vq, &desc)) != -1); return 0; } /* This looks in the virtqueue and for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * * This function returns the descriptor number found, or vq->num (which is * never a valid descriptor number) if none was found. A negative code is * returned on error. */ int vhost_get_vq_desc(struct vhost_virtqueue *vq, struct iovec iov[], unsigned int iov_size, unsigned int *out_num, unsigned int *in_num, struct vhost_log *log, unsigned int *log_num) { struct vring_desc desc; unsigned int i, head, found = 0; u16 last_avail_idx = vq->last_avail_idx; __virtio16 ring_head; int ret, access; if (vq->avail_idx == vq->last_avail_idx) { ret = vhost_get_avail_idx(vq); if (unlikely(ret < 0)) return ret; if (!ret) return vq->num; } /* Grab the next descriptor number they're advertising, and increment * the index we've seen. */ if (unlikely(vhost_get_avail_head(vq, &ring_head, last_avail_idx))) { vq_err(vq, "Failed to read head: idx %d address %p\n", last_avail_idx, &vq->avail->ring[last_avail_idx % vq->num]); return -EFAULT; } head = vhost16_to_cpu(vq, ring_head); /* If their number is silly, that's an error. */ if (unlikely(head >= vq->num)) { vq_err(vq, "Guest says index %u > %u is available", head, vq->num); return -EINVAL; } /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; if (unlikely(log)) *log_num = 0; i = head; do { unsigned iov_count = *in_num + *out_num; if (unlikely(i >= vq->num)) { vq_err(vq, "Desc index is %u > %u, head = %u", i, vq->num, head); return -EINVAL; } if (unlikely(++found > vq->num)) { vq_err(vq, "Loop detected: last one at %u " "vq size %u head %u\n", i, vq->num, head); return -EINVAL; } ret = vhost_get_desc(vq, &desc, i); if (unlikely(ret)) { vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", i, vq->desc + i); return -EFAULT; } if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) { ret = get_indirect(vq, iov, iov_size, out_num, in_num, log, log_num, &desc); if (unlikely(ret < 0)) { if (ret != -EAGAIN) vq_err(vq, "Failure detected " "in indirect descriptor at idx %d\n", i); return ret; } continue; } if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) access = VHOST_ACCESS_WO; else access = VHOST_ACCESS_RO; ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr), vhost32_to_cpu(vq, desc.len), iov + iov_count, iov_size - iov_count, access); if (unlikely(ret < 0)) { if (ret != -EAGAIN) vq_err(vq, "Translation failure %d descriptor idx %d\n", ret, i); return ret; } if (access == VHOST_ACCESS_WO) { /* If this is an input descriptor, * increment that count. */ *in_num += ret; if (unlikely(log && ret)) { log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); log[*log_num].len = vhost32_to_cpu(vq, desc.len); ++*log_num; } } else { /* If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (unlikely(*in_num)) { vq_err(vq, "Descriptor has out after in: " "idx %d\n", i); return -EINVAL; } *out_num += ret; } } while ((i = next_desc(vq, &desc)) != -1); /* On success, increment avail index. */ vq->last_avail_idx++; /* Assume notifications from guest are disabled at this point, * if they aren't we would need to update avail_event index. */ BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY)); return head; } EXPORT_SYMBOL_GPL(vhost_get_vq_desc); /* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */ void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n) { vq->last_avail_idx -= n; } EXPORT_SYMBOL_GPL(vhost_discard_vq_desc); /* After we've used one of their buffers, we tell them about it. We'll then * want to notify the guest, using eventfd. */ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) { struct vring_used_elem heads = { cpu_to_vhost32(vq, head), cpu_to_vhost32(vq, len) }; return vhost_add_used_n(vq, &heads, 1); } EXPORT_SYMBOL_GPL(vhost_add_used); static int __vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { vring_used_elem_t __user *used; u16 old, new; int start; start = vq->last_used_idx & (vq->num - 1); used = vq->used->ring + start; if (vhost_put_used(vq, heads, start, count)) { vq_err(vq, "Failed to write used"); return -EFAULT; } if (unlikely(vq->log_used)) { /* Make sure data is seen before log. */ smp_wmb(); /* Log used ring entry write. */ log_used(vq, ((void __user *)used - (void __user *)vq->used), count * sizeof *used); } old = vq->last_used_idx; new = (vq->last_used_idx += count); /* If the driver never bothers to signal in a very long while, * used index might wrap around. If that happens, invalidate * signalled_used index we stored. TODO: make sure driver * signals at least once in 2^16 and remove this. */ if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old))) vq->signalled_used_valid = false; return 0; } /* After we've used one of their buffers, we tell them about it. We'll then * want to notify the guest, using eventfd. */ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { int start, n, r; start = vq->last_used_idx & (vq->num - 1); n = vq->num - start; if (n < count) { r = __vhost_add_used_n(vq, heads, n); if (r < 0) return r; heads += n; count -= n; } r = __vhost_add_used_n(vq, heads, count); /* Make sure buffer is written before we update index. */ smp_wmb(); if (vhost_put_used_idx(vq)) { vq_err(vq, "Failed to increment used idx"); return -EFAULT; } if (unlikely(vq->log_used)) { /* Make sure used idx is seen before log. */ smp_wmb(); /* Log used index update. */ log_used(vq, offsetof(struct vring_used, idx), sizeof vq->used->idx); if (vq->log_ctx) eventfd_signal(vq->log_ctx); } return r; } EXPORT_SYMBOL_GPL(vhost_add_used_n); static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { __u16 old, new; __virtio16 event; bool v; /* Flush out used index updates. This is paired * with the barrier that the Guest executes when enabling * interrupts. */ smp_mb(); if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) && unlikely(vq->avail_idx == vq->last_avail_idx)) return true; if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { __virtio16 flags; if (vhost_get_avail_flags(vq, &flags)) { vq_err(vq, "Failed to get flags"); return true; } return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT)); } old = vq->signalled_used; v = vq->signalled_used_valid; new = vq->signalled_used = vq->last_used_idx; vq->signalled_used_valid = true; if (unlikely(!v)) return true; if (vhost_get_used_event(vq, &event)) { vq_err(vq, "Failed to get used event idx"); return true; } return vring_need_event(vhost16_to_cpu(vq, event), new, old); } /* This actually signals the guest, using eventfd. */ void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { /* Signal the Guest tell them we used something up. */ if (vq->call_ctx.ctx && vhost_notify(dev, vq)) eventfd_signal(vq->call_ctx.ctx); } EXPORT_SYMBOL_GPL(vhost_signal); /* And here's the combo meal deal. Supersize me! */ void vhost_add_used_and_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq, unsigned int head, int len) { vhost_add_used(vq, head, len); vhost_signal(dev, vq); } EXPORT_SYMBOL_GPL(vhost_add_used_and_signal); /* multi-buffer version of vhost_add_used_and_signal */ void vhost_add_used_and_signal_n(struct vhost_dev *dev, struct vhost_virtqueue *vq, struct vring_used_elem *heads, unsigned count) { vhost_add_used_n(vq, heads, count); vhost_signal(dev, vq); } EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n); /* return true if we're sure that avaiable ring is empty */ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq) { int r; if (vq->avail_idx != vq->last_avail_idx) return false; r = vhost_get_avail_idx(vq); /* Note: we treat error as non-empty here */ return r == 0; } EXPORT_SYMBOL_GPL(vhost_vq_avail_empty); /* OK, now we need to know about added descriptors. */ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { int r; if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY)) return false; vq->used_flags &= ~VRING_USED_F_NO_NOTIFY; if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { r = vhost_update_used_flags(vq); if (r) { vq_err(vq, "Failed to enable notification at %p: %d\n", &vq->used->flags, r); return false; } } else { r = vhost_update_avail_event(vq); if (r) { vq_err(vq, "Failed to update avail event index at %p: %d\n", vhost_avail_event(vq), r); return false; } } /* They could have slipped one in as we were doing that: make * sure it's written, then check again. */ smp_mb(); r = vhost_get_avail_idx(vq); /* Note: we treat error as empty here */ if (unlikely(r < 0)) return false; return r; } EXPORT_SYMBOL_GPL(vhost_enable_notify); /* We don't need to be notified again. */ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) { int r; if (vq->used_flags & VRING_USED_F_NO_NOTIFY) return; vq->used_flags |= VRING_USED_F_NO_NOTIFY; if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) { r = vhost_update_used_flags(vq); if (r) vq_err(vq, "Failed to disable notification at %p: %d\n", &vq->used->flags, r); } } EXPORT_SYMBOL_GPL(vhost_disable_notify); /* Create a new message. */ struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type) { /* Make sure all padding within the structure is initialized. */ struct vhost_msg_node *node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return NULL; node->vq = vq; node->msg.type = type; return node; } EXPORT_SYMBOL_GPL(vhost_new_msg); void vhost_enqueue_msg(struct vhost_dev *dev, struct list_head *head, struct vhost_msg_node *node) { spin_lock(&dev->iotlb_lock); list_add_tail(&node->node, head); spin_unlock(&dev->iotlb_lock); wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); } EXPORT_SYMBOL_GPL(vhost_enqueue_msg); struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, struct list_head *head) { struct vhost_msg_node *node = NULL; spin_lock(&dev->iotlb_lock); if (!list_empty(head)) { node = list_first_entry(head, struct vhost_msg_node, node); list_del(&node->node); } spin_unlock(&dev->iotlb_lock); return node; } EXPORT_SYMBOL_GPL(vhost_dequeue_msg); void vhost_set_backend_features(struct vhost_dev *dev, u64 features) { struct vhost_virtqueue *vq; int i; mutex_lock(&dev->mutex); for (i = 0; i < dev->nvqs; ++i) { vq = dev->vqs[i]; mutex_lock(&vq->mutex); vq->acked_backend_features = features; mutex_unlock(&vq->mutex); } mutex_unlock(&dev->mutex); } EXPORT_SYMBOL_GPL(vhost_set_backend_features); static int __init vhost_init(void) { return 0; } static void __exit vhost_exit(void) { } module_init(vhost_init); module_exit(vhost_exit); MODULE_VERSION("0.0.1"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Michael S. Tsirkin"); MODULE_DESCRIPTION("Host kernel accelerator for virtio"); |
| 10 10 10 10 10 5 2 2 2 2 2 4 4 10 10 10 10 10 7 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <media/frame_vector.h> /** * get_vaddr_frames() - map virtual addresses to pfns * @start: starting user address * @nr_frames: number of pages / pfns from start to map * @write: the mapped address has write permission * @vec: structure which receives pages / pfns of the addresses mapped. * It should have space for at least nr_frames entries. * * This function maps virtual addresses from @start and fills @vec structure * with page frame numbers or page pointers to corresponding pages (choice * depends on the type of the vma underlying the virtual address). If @start * belongs to a normal vma, the function grabs reference to each of the pages * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't * touch page structures and the caller must make sure pfns aren't reused for * anything else while he is using them. * * The function returns number of pages mapped which may be less than * @nr_frames. In particular we stop mapping if there are more vmas of * different type underlying the specified range of virtual addresses. * When the function isn't able to map a single page, it returns error. * * Note that get_vaddr_frames() cannot follow VM_IO mappings. It used * to be able to do that, but that could (racily) return non-refcounted * pfns. * * This function takes care of grabbing mmap_lock as necessary. */ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, bool write, struct frame_vector *vec) { int ret; unsigned int gup_flags = FOLL_LONGTERM; if (nr_frames == 0) return 0; if (WARN_ON_ONCE(nr_frames > vec->nr_allocated)) nr_frames = vec->nr_allocated; start = untagged_addr(start); if (write) gup_flags |= FOLL_WRITE; ret = pin_user_pages_fast(start, nr_frames, gup_flags, (struct page **)(vec->ptrs)); vec->got_ref = true; vec->is_pfns = false; vec->nr_frames = ret; if (likely(ret > 0)) return ret; vec->nr_frames = 0; return ret ? ret : -EFAULT; } EXPORT_SYMBOL(get_vaddr_frames); /** * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired * them * @vec: frame vector to put * * Drop references to pages if get_vaddr_frames() acquired them. We also * invalidate the frame vector so that it is prepared for the next call into * get_vaddr_frames(). */ void put_vaddr_frames(struct frame_vector *vec) { struct page **pages; if (!vec->got_ref) goto out; pages = frame_vector_pages(vec); /* * frame_vector_pages() might needed to do a conversion when * get_vaddr_frames() got pages but vec was later converted to pfns. * But it shouldn't really fail to convert pfns back... */ if (WARN_ON(IS_ERR(pages))) goto out; unpin_user_pages(pages, vec->nr_frames); vec->got_ref = false; out: vec->nr_frames = 0; } EXPORT_SYMBOL(put_vaddr_frames); /** * frame_vector_to_pages - convert frame vector to contain page pointers * @vec: frame vector to convert * * Convert @vec to contain array of page pointers. If the conversion is * successful, return 0. Otherwise return an error. Note that we do not grab * page references for the page structures. */ int frame_vector_to_pages(struct frame_vector *vec) { int i; unsigned long *nums; struct page **pages; if (!vec->is_pfns) return 0; nums = frame_vector_pfns(vec); for (i = 0; i < vec->nr_frames; i++) if (!pfn_valid(nums[i])) return -EINVAL; pages = (struct page **)nums; for (i = 0; i < vec->nr_frames; i++) pages[i] = pfn_to_page(nums[i]); vec->is_pfns = false; return 0; } EXPORT_SYMBOL(frame_vector_to_pages); /** * frame_vector_to_pfns - convert frame vector to contain pfns * @vec: frame vector to convert * * Convert @vec to contain array of pfns. */ void frame_vector_to_pfns(struct frame_vector *vec) { int i; unsigned long *nums; struct page **pages; if (vec->is_pfns) return; pages = (struct page **)(vec->ptrs); nums = (unsigned long *)pages; for (i = 0; i < vec->nr_frames; i++) nums[i] = page_to_pfn(pages[i]); vec->is_pfns = true; } EXPORT_SYMBOL(frame_vector_to_pfns); /** * frame_vector_create() - allocate & initialize structure for pinned pfns * @nr_frames: number of pfns slots we should reserve * * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns * pfns. */ struct frame_vector *frame_vector_create(unsigned int nr_frames) { struct frame_vector *vec; int size = struct_size(vec, ptrs, nr_frames); if (WARN_ON_ONCE(nr_frames == 0)) return NULL; /* * This is absurdly high. It's here just to avoid strange effects when * arithmetics overflows. */ if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2)) return NULL; /* * Avoid higher order allocations, use vmalloc instead. It should * be rare anyway. */ vec = kvmalloc(size, GFP_KERNEL); if (!vec) return NULL; vec->nr_allocated = nr_frames; vec->nr_frames = 0; return vec; } EXPORT_SYMBOL(frame_vector_create); /** * frame_vector_destroy() - free memory allocated to carry frame vector * @vec: Frame vector to free * * Free structure allocated by frame_vector_create() to carry frames. */ void frame_vector_destroy(struct frame_vector *vec) { /* Make sure put_vaddr_frames() got called properly... */ VM_BUG_ON(vec->nr_frames > 0); kvfree(vec); } EXPORT_SYMBOL(frame_vector_destroy); |
| 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2011-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the hash:net,iface type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter_bridge.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 nomatch flag support added */ /* 2 /0 support added */ /* 3 Counters support added */ /* 4 Comments support added */ /* 5 Forceadd support added */ /* 6 skbinfo support added */ /* 7 interface wildcard support added */ #define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("hash:net,iface", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:net,iface"); /* Type specific function prefix */ #define HTYPE hash_netiface #define IP_SET_HASH_WITH_NETS #define IP_SET_HASH_WITH_MULTI #define IP_SET_HASH_WITH_NET0 #define STRSCPY(a, b) strscpy(a, b, IFNAMSIZ) /* IPv4 variant */ struct hash_netiface4_elem_hashed { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; }; /* Member elements */ struct hash_netiface4_elem { __be32 ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; u8 wildcard; char iface[IFNAMSIZ]; }; /* Common functions */ static bool hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1, const struct hash_netiface4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && (ip1->wildcard ? strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 : strcmp(ip1->iface, ip2->iface) == 0); } static int hash_netiface4_do_data_match(const struct hash_netiface4_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_netiface4_data_set_flags(struct hash_netiface4_elem *elem, u32 flags) { elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH; } static void hash_netiface4_data_reset_flags(struct hash_netiface4_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr) { elem->ip &= ip_set_netmask(cidr); elem->cidr = cidr; } static bool hash_netiface4_data_list(struct sk_buff *skb, const struct hash_netiface4_elem *data) { u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) | (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0); if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_netiface4_data_next(struct hash_netiface4_elem *next, const struct hash_netiface4_elem *d) { next->ip = d->ip; } #define MTYPE hash_netiface4 #define HOST_MASK 32 #define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed) #include "ip_set_hash_gen.h" #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) static const char *get_physindev_name(const struct sk_buff *skb, struct net *net) { struct net_device *dev = nf_bridge_get_physindev(skb, net); return dev ? dev->name : NULL; } static const char *get_physoutdev_name(const struct sk_buff *skb) { struct net_device *dev = nf_bridge_get_physoutdev(skb); return dev ? dev->name : NULL; } #endif static int hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct hash_netiface4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), .elem = 1, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); e.ip &= ip_set_netmask(e.cidr); #define IFACE(dir) (par->state->dir ? par->state->dir->name : "") #define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC) if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) : get_physoutdev_name(skb); if (!eiface) return -EINVAL; STRSCPY(e.iface, eiface); e.physdev = 1; #endif } else { STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); } if (strlen(e.iface) == 0) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct hash_netiface4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, i = 0; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IFACE] || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (e.cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ); if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) e.physdev = 1; if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD) e.wildcard = 1; } if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) { e.ip = htonl(ip & ip_set_hostmask(e.cidr)); ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip_to < ip) swap(ip, ip_to); if (ip + UINT_MAX == ip_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip, ip_to, e.cidr); } if (retried) ip = ntohl(h->next.ip); do { i++; e.ip = htonl(ip); if (i > IPSET_MAX_RANGE) { hash_netiface4_data_next(&h->next, &e); return -ERANGE; } ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } while (ip++ < ip_to); return ret; } /* IPv6 variant */ struct hash_netiface6_elem_hashed { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; }; struct hash_netiface6_elem { union nf_inet_addr ip; u8 physdev; u8 cidr; u8 nomatch; u8 elem; u8 wildcard; char iface[IFNAMSIZ]; }; /* Common functions */ static bool hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1, const struct hash_netiface6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ip1->cidr == ip2->cidr && (++*multi) && ip1->physdev == ip2->physdev && (ip1->wildcard ? strncmp(ip1->iface, ip2->iface, strlen(ip1->iface)) == 0 : strcmp(ip1->iface, ip2->iface) == 0); } static int hash_netiface6_do_data_match(const struct hash_netiface6_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_netiface6_data_set_flags(struct hash_netiface6_elem *elem, u32 flags) { elem->nomatch = (flags >> 16) & IPSET_FLAG_NOMATCH; } static void hash_netiface6_data_reset_flags(struct hash_netiface6_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip, cidr); elem->cidr = cidr; } static bool hash_netiface6_data_list(struct sk_buff *skb, const struct hash_netiface6_elem *data) { u32 flags = (data->physdev ? IPSET_FLAG_PHYSDEV : 0) | (data->wildcard ? IPSET_FLAG_IFACE_WILDCARD : 0); if (data->nomatch) flags |= IPSET_FLAG_NOMATCH; if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) || nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_netiface6_data_next(struct hash_netiface6_elem *next, const struct hash_netiface6_elem *d) { } #undef MTYPE #undef HOST_MASK #define MTYPE hash_netiface6 #define HOST_MASK 128 #define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed) #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct hash_netiface6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface6_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), .elem = 1, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6_netmask(&e.ip, e.cidr); if (opt->cmdflags & IPSET_FLAG_PHYSDEV) { #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) const char *eiface = SRCDIR ? get_physindev_name(skb, xt_net(par)) : get_physoutdev_name(skb); if (!eiface) return -EINVAL; STRSCPY(e.iface, eiface); e.physdev = 1; #endif } else { STRSCPY(e.iface, SRCDIR ? IFACE(in) : IFACE(out)); } if (strlen(e.iface) == 0) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_netiface6_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IFACE] || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (tb[IPSET_ATTR_CIDR]) { e.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (e.cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } ip6_netmask(&e.ip, e.cidr); nla_strscpy(e.iface, tb[IPSET_ATTR_IFACE], IFNAMSIZ); if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_PHYSDEV) e.physdev = 1; if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); if (cadt_flags & IPSET_FLAG_IFACE_WILDCARD) e.wildcard = 1; } ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } static struct ip_set_type hash_netiface_type __read_mostly = { .name = "hash:net,iface", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE | IPSET_TYPE_NOMATCH, .dimension = IPSET_DIM_TWO, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_netiface_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_netiface_init(void) { return ip_set_type_register(&hash_netiface_type); } static void __exit hash_netiface_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_netiface_type); } module_init(hash_netiface_init); module_exit(hash_netiface_fini); |
| 4 3 6 5 1 1 2 2 1 6 1 1 1 2 2 2 2 2 2 1 2 2 2 2 2 2 2 4 2 5 5 5 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd. */ #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/module.h> #include <linux/init.h> #include <linux/time.h> #include <linux/mount.h> #include <linux/cred.h> #include <linux/statfs.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include <linux/fs_struct.h> #include <linux/iversion.h> #include <linux/nls.h> #include <linux/buffer_head.h> #include <linux/magic.h> #include "exfat_raw.h" #include "exfat_fs.h" static char exfat_default_iocharset[] = CONFIG_EXFAT_DEFAULT_IOCHARSET; static struct kmem_cache *exfat_inode_cachep; static void exfat_free_iocharset(struct exfat_sb_info *sbi) { if (sbi->options.iocharset != exfat_default_iocharset) kfree(sbi->options.iocharset); } static void exfat_put_super(struct super_block *sb) { struct exfat_sb_info *sbi = EXFAT_SB(sb); mutex_lock(&sbi->s_lock); exfat_clear_volume_dirty(sb); exfat_free_bitmap(sbi); brelse(sbi->boot_bh); mutex_unlock(&sbi->s_lock); } static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); unsigned long long id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = sbi->cluster_size; buf->f_blocks = sbi->num_clusters - 2; /* clu 0 & 1 */ buf->f_bfree = buf->f_blocks - sbi->used_clusters; buf->f_bavail = buf->f_bfree; buf->f_fsid = u64_to_fsid(id); /* Unicode utf16 255 characters */ buf->f_namelen = EXFAT_MAX_FILE_LEN * NLS_MAX_CHARSET_SIZE; return 0; } static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags) { struct exfat_sb_info *sbi = EXFAT_SB(sb); struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data; /* retain persistent-flags */ new_flags |= sbi->vol_flags_persistent; /* flags are not changed */ if (sbi->vol_flags == new_flags) return 0; sbi->vol_flags = new_flags; /* skip updating volume dirty flag, * if this volume has been mounted with read-only */ if (sb_rdonly(sb)) return 0; p_boot->vol_flags = cpu_to_le16(new_flags); set_buffer_uptodate(sbi->boot_bh); mark_buffer_dirty(sbi->boot_bh); __sync_dirty_buffer(sbi->boot_bh, REQ_SYNC | REQ_FUA | REQ_PREFLUSH); return 0; } int exfat_set_volume_dirty(struct super_block *sb) { struct exfat_sb_info *sbi = EXFAT_SB(sb); return exfat_set_vol_flags(sb, sbi->vol_flags | VOLUME_DIRTY); } int exfat_clear_volume_dirty(struct super_block *sb) { struct exfat_sb_info *sbi = EXFAT_SB(sb); return exfat_set_vol_flags(sb, sbi->vol_flags & ~VOLUME_DIRTY); } static int exfat_show_options(struct seq_file *m, struct dentry *root) { struct super_block *sb = root->d_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_mount_options *opts = &sbi->options; /* Show partition info */ if (!uid_eq(opts->fs_uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, opts->fs_uid)); if (!gid_eq(opts->fs_gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, opts->fs_gid)); seq_printf(m, ",fmask=%04o,dmask=%04o", opts->fs_fmask, opts->fs_dmask); if (opts->allow_utime) seq_printf(m, ",allow_utime=%04o", opts->allow_utime); if (opts->utf8) seq_puts(m, ",iocharset=utf8"); else if (sbi->nls_io) seq_printf(m, ",iocharset=%s", sbi->nls_io->charset); if (opts->errors == EXFAT_ERRORS_CONT) seq_puts(m, ",errors=continue"); else if (opts->errors == EXFAT_ERRORS_PANIC) seq_puts(m, ",errors=panic"); else seq_puts(m, ",errors=remount-ro"); if (opts->discard) seq_puts(m, ",discard"); if (opts->keep_last_dots) seq_puts(m, ",keep_last_dots"); if (opts->sys_tz) seq_puts(m, ",sys_tz"); else if (opts->time_offset) seq_printf(m, ",time_offset=%d", opts->time_offset); if (opts->zero_size_dir) seq_puts(m, ",zero_size_dir"); return 0; } int exfat_force_shutdown(struct super_block *sb, u32 flags) { int ret; struct exfat_sb_info *sbi = sb->s_fs_info; struct exfat_mount_options *opts = &sbi->options; if (exfat_forced_shutdown(sb)) return 0; switch (flags) { case EXFAT_GOING_DOWN_DEFAULT: case EXFAT_GOING_DOWN_FULLSYNC: ret = bdev_freeze(sb->s_bdev); if (ret) return ret; bdev_thaw(sb->s_bdev); set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags); break; case EXFAT_GOING_DOWN_NOSYNC: set_bit(EXFAT_FLAGS_SHUTDOWN, &sbi->s_exfat_flags); break; default: return -EINVAL; } if (opts->discard) opts->discard = 0; return 0; } static void exfat_shutdown(struct super_block *sb) { exfat_force_shutdown(sb, EXFAT_GOING_DOWN_NOSYNC); } static struct inode *exfat_alloc_inode(struct super_block *sb) { struct exfat_inode_info *ei; ei = alloc_inode_sb(sb, exfat_inode_cachep, GFP_NOFS); if (!ei) return NULL; init_rwsem(&ei->truncate_lock); return &ei->vfs_inode; } static void exfat_free_inode(struct inode *inode) { kmem_cache_free(exfat_inode_cachep, EXFAT_I(inode)); } static const struct super_operations exfat_sops = { .alloc_inode = exfat_alloc_inode, .free_inode = exfat_free_inode, .write_inode = exfat_write_inode, .evict_inode = exfat_evict_inode, .put_super = exfat_put_super, .statfs = exfat_statfs, .show_options = exfat_show_options, .shutdown = exfat_shutdown, }; enum { Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_allow_utime, Opt_charset, Opt_errors, Opt_discard, Opt_keep_last_dots, Opt_sys_tz, Opt_time_offset, Opt_zero_size_dir, /* Deprecated options */ Opt_utf8, Opt_debug, Opt_namecase, Opt_codepage, }; static const struct constant_table exfat_param_enums[] = { { "continue", EXFAT_ERRORS_CONT }, { "panic", EXFAT_ERRORS_PANIC }, { "remount-ro", EXFAT_ERRORS_RO }, {} }; static const struct fs_parameter_spec exfat_parameters[] = { fsparam_uid("uid", Opt_uid), fsparam_gid("gid", Opt_gid), fsparam_u32oct("umask", Opt_umask), fsparam_u32oct("dmask", Opt_dmask), fsparam_u32oct("fmask", Opt_fmask), fsparam_u32oct("allow_utime", Opt_allow_utime), fsparam_string("iocharset", Opt_charset), fsparam_enum("errors", Opt_errors, exfat_param_enums), fsparam_flag("discard", Opt_discard), fsparam_flag("keep_last_dots", Opt_keep_last_dots), fsparam_flag("sys_tz", Opt_sys_tz), fsparam_s32("time_offset", Opt_time_offset), fsparam_flag("zero_size_dir", Opt_zero_size_dir), __fsparam(NULL, "utf8", Opt_utf8, fs_param_deprecated, NULL), __fsparam(NULL, "debug", Opt_debug, fs_param_deprecated, NULL), __fsparam(fs_param_is_u32, "namecase", Opt_namecase, fs_param_deprecated, NULL), __fsparam(fs_param_is_u32, "codepage", Opt_codepage, fs_param_deprecated, NULL), {} }; static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct exfat_sb_info *sbi = fc->s_fs_info; struct exfat_mount_options *opts = &sbi->options; struct fs_parse_result result; int opt; opt = fs_parse(fc, exfat_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_uid: opts->fs_uid = result.uid; break; case Opt_gid: opts->fs_gid = result.gid; break; case Opt_umask: opts->fs_fmask = result.uint_32; opts->fs_dmask = result.uint_32; break; case Opt_dmask: opts->fs_dmask = result.uint_32; break; case Opt_fmask: opts->fs_fmask = result.uint_32; break; case Opt_allow_utime: opts->allow_utime = result.uint_32 & 0022; break; case Opt_charset: exfat_free_iocharset(sbi); opts->iocharset = param->string; param->string = NULL; break; case Opt_errors: opts->errors = result.uint_32; break; case Opt_discard: opts->discard = 1; break; case Opt_keep_last_dots: opts->keep_last_dots = 1; break; case Opt_sys_tz: opts->sys_tz = 1; break; case Opt_time_offset: /* * Make the limit 24 just in case someone invents something * unusual. */ if (result.int_32 < -24 * 60 || result.int_32 > 24 * 60) return -EINVAL; opts->time_offset = result.int_32; break; case Opt_zero_size_dir: opts->zero_size_dir = true; break; case Opt_utf8: case Opt_debug: case Opt_namecase: case Opt_codepage: break; default: return -EINVAL; } return 0; } static void exfat_hash_init(struct super_block *sb) { struct exfat_sb_info *sbi = EXFAT_SB(sb); int i; spin_lock_init(&sbi->inode_hash_lock); for (i = 0; i < EXFAT_HASH_SIZE; i++) INIT_HLIST_HEAD(&sbi->inode_hashtable[i]); } static int exfat_read_root(struct inode *inode) { struct super_block *sb = inode->i_sb; struct exfat_sb_info *sbi = EXFAT_SB(sb); struct exfat_inode_info *ei = EXFAT_I(inode); struct exfat_chain cdir; int num_subdirs, num_clu = 0; exfat_chain_set(&ei->dir, sbi->root_dir, 0, ALLOC_FAT_CHAIN); ei->entry = -1; ei->start_clu = sbi->root_dir; ei->flags = ALLOC_FAT_CHAIN; ei->type = TYPE_DIR; ei->version = 0; ei->hint_bmap.off = EXFAT_EOF_CLUSTER; ei->hint_stat.eidx = 0; ei->hint_stat.clu = sbi->root_dir; ei->hint_femp.eidx = EXFAT_HINT_NONE; exfat_chain_set(&cdir, sbi->root_dir, 0, ALLOC_FAT_CHAIN); if (exfat_count_num_clusters(sb, &cdir, &num_clu)) return -EIO; i_size_write(inode, num_clu << sbi->cluster_size_bits); num_subdirs = exfat_count_dir_entries(sb, &cdir); if (num_subdirs < 0) return -EIO; set_nlink(inode, num_subdirs + EXFAT_MIN_SUBDIR); inode->i_uid = sbi->options.fs_uid; inode->i_gid = sbi->options.fs_gid; inode_inc_iversion(inode); inode->i_generation = 0; inode->i_mode = exfat_make_mode(sbi, EXFAT_ATTR_SUBDIR, 0777); inode->i_op = &exfat_dir_inode_operations; inode->i_fop = &exfat_dir_operations; inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >> 9; ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff; exfat_save_attr(inode, EXFAT_ATTR_SUBDIR); ei->i_crtime = simple_inode_init_ts(inode); exfat_truncate_inode_atime(inode); return 0; } static int exfat_calibrate_blocksize(struct super_block *sb, int logical_sect) { struct exfat_sb_info *sbi = EXFAT_SB(sb); if (!is_power_of_2(logical_sect)) { exfat_err(sb, "bogus logical sector size %u", logical_sect); return -EIO; } if (logical_sect < sb->s_blocksize) { exfat_err(sb, "logical sector size too small for device (logical sector size = %u)", logical_sect); return -EIO; } if (logical_sect > sb->s_blocksize) { brelse(sbi->boot_bh); sbi->boot_bh = NULL; if (!sb_set_blocksize(sb, logical_sect)) { exfat_err(sb, "unable to set blocksize %u", logical_sect); return -EIO; } sbi->boot_bh = sb_bread(sb, 0); if (!sbi->boot_bh) { exfat_err(sb, "unable to read boot sector (logical sector size = %lu)", sb->s_blocksize); return -EIO; } } return 0; } static int exfat_read_boot_sector(struct super_block *sb) { struct boot_sector *p_boot; struct exfat_sb_info *sbi = EXFAT_SB(sb); /* set block size to read super block */ sb_min_blocksize(sb, 512); /* read boot sector */ sbi->boot_bh = sb_bread(sb, 0); if (!sbi->boot_bh) { exfat_err(sb, "unable to read boot sector"); return -EIO; } p_boot = (struct boot_sector *)sbi->boot_bh->b_data; /* check the validity of BOOT */ if (le16_to_cpu((p_boot->signature)) != BOOT_SIGNATURE) { exfat_err(sb, "invalid boot record signature"); return -EINVAL; } if (memcmp(p_boot->fs_name, STR_EXFAT, BOOTSEC_FS_NAME_LEN)) { exfat_err(sb, "invalid fs_name"); /* fs_name may unprintable */ return -EINVAL; } /* * must_be_zero field must be filled with zero to prevent mounting * from FAT volume. */ if (memchr_inv(p_boot->must_be_zero, 0, sizeof(p_boot->must_be_zero))) return -EINVAL; if (p_boot->num_fats != 1 && p_boot->num_fats != 2) { exfat_err(sb, "bogus number of FAT structure"); return -EINVAL; } /* * sect_size_bits could be at least 9 and at most 12. */ if (p_boot->sect_size_bits < EXFAT_MIN_SECT_SIZE_BITS || p_boot->sect_size_bits > EXFAT_MAX_SECT_SIZE_BITS) { exfat_err(sb, "bogus sector size bits : %u", p_boot->sect_size_bits); return -EINVAL; } /* * sect_per_clus_bits could be at least 0 and at most 25 - sect_size_bits. */ if (p_boot->sect_per_clus_bits > EXFAT_MAX_SECT_PER_CLUS_BITS(p_boot)) { exfat_err(sb, "bogus sectors bits per cluster : %u", p_boot->sect_per_clus_bits); return -EINVAL; } sbi->sect_per_clus = 1 << p_boot->sect_per_clus_bits; sbi->sect_per_clus_bits = p_boot->sect_per_clus_bits; sbi->cluster_size_bits = p_boot->sect_per_clus_bits + p_boot->sect_size_bits; sbi->cluster_size = 1 << sbi->cluster_size_bits; sbi->num_FAT_sectors = le32_to_cpu(p_boot->fat_length); sbi->FAT1_start_sector = le32_to_cpu(p_boot->fat_offset); sbi->FAT2_start_sector = le32_to_cpu(p_boot->fat_offset); if (p_boot->num_fats == 2) sbi->FAT2_start_sector += sbi->num_FAT_sectors; sbi->data_start_sector = le32_to_cpu(p_boot->clu_offset); sbi->num_sectors = le64_to_cpu(p_boot->vol_length); /* because the cluster index starts with 2 */ sbi->num_clusters = le32_to_cpu(p_boot->clu_count) + EXFAT_RESERVED_CLUSTERS; sbi->root_dir = le32_to_cpu(p_boot->root_cluster); sbi->dentries_per_clu = 1 << (sbi->cluster_size_bits - DENTRY_SIZE_BITS); sbi->vol_flags = le16_to_cpu(p_boot->vol_flags); sbi->vol_flags_persistent = sbi->vol_flags & (VOLUME_DIRTY | MEDIA_FAILURE); sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER; /* check consistencies */ if ((u64)sbi->num_FAT_sectors << p_boot->sect_size_bits < (u64)sbi->num_clusters * 4) { exfat_err(sb, "bogus fat length"); return -EINVAL; } if (sbi->data_start_sector < (u64)sbi->FAT1_start_sector + (u64)sbi->num_FAT_sectors * p_boot->num_fats) { exfat_err(sb, "bogus data start sector"); return -EINVAL; } if (sbi->vol_flags & VOLUME_DIRTY) exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck."); if (sbi->vol_flags & MEDIA_FAILURE) exfat_warn(sb, "Medium has reported failures. Some data may be lost."); /* exFAT file size is limited by a disk volume size */ sb->s_maxbytes = (u64)(sbi->num_clusters - EXFAT_RESERVED_CLUSTERS) << sbi->cluster_size_bits; /* check logical sector size */ if (exfat_calibrate_blocksize(sb, 1 << p_boot->sect_size_bits)) return -EIO; return 0; } static int exfat_verify_boot_region(struct super_block *sb) { struct buffer_head *bh = NULL; u32 chksum = 0; __le32 *p_sig, *p_chksum; int sn, i; /* read boot sector sub-regions */ for (sn = 0; sn < 11; sn++) { bh = sb_bread(sb, sn); if (!bh) return -EIO; if (sn != 0 && sn <= 8) { /* extended boot sector sub-regions */ p_sig = (__le32 *)&bh->b_data[sb->s_blocksize - 4]; if (le32_to_cpu(*p_sig) != EXBOOT_SIGNATURE) exfat_warn(sb, "Invalid exboot-signature(sector = %d): 0x%08x", sn, le32_to_cpu(*p_sig)); } chksum = exfat_calc_chksum32(bh->b_data, sb->s_blocksize, chksum, sn ? CS_DEFAULT : CS_BOOT_SECTOR); brelse(bh); } /* boot checksum sub-regions */ bh = sb_bread(sb, sn); if (!bh) return -EIO; for (i = 0; i < sb->s_blocksize; i += sizeof(u32)) { p_chksum = (__le32 *)&bh->b_data[i]; if (le32_to_cpu(*p_chksum) != chksum) { exfat_err(sb, "Invalid boot checksum (boot checksum : 0x%08x, checksum : 0x%08x)", le32_to_cpu(*p_chksum), chksum); brelse(bh); return -EINVAL; } } brelse(bh); return 0; } /* mount the file system volume */ static int __exfat_fill_super(struct super_block *sb) { int ret; struct exfat_sb_info *sbi = EXFAT_SB(sb); ret = exfat_read_boot_sector(sb); if (ret) { exfat_err(sb, "failed to read boot sector"); goto free_bh; } ret = exfat_verify_boot_region(sb); if (ret) { exfat_err(sb, "invalid boot region"); goto free_bh; } ret = exfat_create_upcase_table(sb); if (ret) { exfat_err(sb, "failed to load upcase table"); goto free_bh; } ret = exfat_load_bitmap(sb); if (ret) { exfat_err(sb, "failed to load alloc-bitmap"); goto free_bh; } ret = exfat_count_used_clusters(sb, &sbi->used_clusters); if (ret) { exfat_err(sb, "failed to scan clusters"); goto free_alloc_bitmap; } return 0; free_alloc_bitmap: exfat_free_bitmap(sbi); free_bh: brelse(sbi->boot_bh); return ret; } static int exfat_fill_super(struct super_block *sb, struct fs_context *fc) { struct exfat_sb_info *sbi = sb->s_fs_info; struct exfat_mount_options *opts = &sbi->options; struct inode *root_inode; int err; if (opts->allow_utime == (unsigned short)-1) opts->allow_utime = ~opts->fs_dmask & 0022; if (opts->discard && !bdev_max_discard_sectors(sb->s_bdev)) { exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard"); opts->discard = 0; } sb->s_flags |= SB_NODIRATIME; sb->s_magic = EXFAT_SUPER_MAGIC; sb->s_op = &exfat_sops; sb->s_time_gran = 10 * NSEC_PER_MSEC; sb->s_time_min = EXFAT_MIN_TIMESTAMP_SECS; sb->s_time_max = EXFAT_MAX_TIMESTAMP_SECS; err = __exfat_fill_super(sb); if (err) { exfat_err(sb, "failed to recognize exfat type"); goto check_nls_io; } /* set up enough so that it can read an inode */ exfat_hash_init(sb); if (!strcmp(sbi->options.iocharset, "utf8")) opts->utf8 = 1; else { sbi->nls_io = load_nls(sbi->options.iocharset); if (!sbi->nls_io) { exfat_err(sb, "IO charset %s not found", sbi->options.iocharset); err = -EINVAL; goto free_table; } } if (sbi->options.utf8) sb->s_d_op = &exfat_utf8_dentry_ops; else sb->s_d_op = &exfat_dentry_ops; root_inode = new_inode(sb); if (!root_inode) { exfat_err(sb, "failed to allocate root inode"); err = -ENOMEM; goto free_table; } root_inode->i_ino = EXFAT_ROOT_INO; inode_set_iversion(root_inode, 1); err = exfat_read_root(root_inode); if (err) { exfat_err(sb, "failed to initialize root inode"); goto put_inode; } exfat_hash_inode(root_inode, EXFAT_I(root_inode)->i_pos); insert_inode_hash(root_inode); sb->s_root = d_make_root(root_inode); if (!sb->s_root) { exfat_err(sb, "failed to get the root dentry"); err = -ENOMEM; goto free_table; } return 0; put_inode: iput(root_inode); sb->s_root = NULL; free_table: exfat_free_bitmap(sbi); brelse(sbi->boot_bh); check_nls_io: return err; } static int exfat_get_tree(struct fs_context *fc) { return get_tree_bdev(fc, exfat_fill_super); } static void exfat_free_sbi(struct exfat_sb_info *sbi) { exfat_free_iocharset(sbi); kfree(sbi); } static void exfat_free(struct fs_context *fc) { struct exfat_sb_info *sbi = fc->s_fs_info; if (sbi) exfat_free_sbi(sbi); } static int exfat_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; fc->sb_flags |= SB_NODIRATIME; sync_filesystem(sb); mutex_lock(&EXFAT_SB(sb)->s_lock); exfat_clear_volume_dirty(sb); mutex_unlock(&EXFAT_SB(sb)->s_lock); return 0; } static const struct fs_context_operations exfat_context_ops = { .parse_param = exfat_parse_param, .get_tree = exfat_get_tree, .free = exfat_free, .reconfigure = exfat_reconfigure, }; static int exfat_init_fs_context(struct fs_context *fc) { struct exfat_sb_info *sbi; sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; mutex_init(&sbi->s_lock); mutex_init(&sbi->bitmap_lock); ratelimit_state_init(&sbi->ratelimit, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); sbi->options.fs_uid = current_uid(); sbi->options.fs_gid = current_gid(); sbi->options.fs_fmask = current->fs->umask; sbi->options.fs_dmask = current->fs->umask; sbi->options.allow_utime = -1; sbi->options.iocharset = exfat_default_iocharset; sbi->options.errors = EXFAT_ERRORS_RO; fc->s_fs_info = sbi; fc->ops = &exfat_context_ops; return 0; } static void delayed_free(struct rcu_head *p) { struct exfat_sb_info *sbi = container_of(p, struct exfat_sb_info, rcu); unload_nls(sbi->nls_io); exfat_free_upcase_table(sbi); exfat_free_sbi(sbi); } static void exfat_kill_sb(struct super_block *sb) { struct exfat_sb_info *sbi = sb->s_fs_info; kill_block_super(sb); if (sbi) call_rcu(&sbi->rcu, delayed_free); } static struct file_system_type exfat_fs_type = { .owner = THIS_MODULE, .name = "exfat", .init_fs_context = exfat_init_fs_context, .parameters = exfat_parameters, .kill_sb = exfat_kill_sb, .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP, }; static void exfat_inode_init_once(void *foo) { struct exfat_inode_info *ei = (struct exfat_inode_info *)foo; spin_lock_init(&ei->cache_lru_lock); ei->nr_caches = 0; ei->cache_valid_id = EXFAT_CACHE_VALID + 1; INIT_LIST_HEAD(&ei->cache_lru); INIT_HLIST_NODE(&ei->i_hash_fat); inode_init_once(&ei->vfs_inode); } static int __init init_exfat_fs(void) { int err; err = exfat_cache_init(); if (err) return err; exfat_inode_cachep = kmem_cache_create("exfat_inode_cache", sizeof(struct exfat_inode_info), 0, SLAB_RECLAIM_ACCOUNT, exfat_inode_init_once); if (!exfat_inode_cachep) { err = -ENOMEM; goto shutdown_cache; } err = register_filesystem(&exfat_fs_type); if (err) goto destroy_cache; return 0; destroy_cache: kmem_cache_destroy(exfat_inode_cachep); shutdown_cache: exfat_cache_shutdown(); return err; } static void __exit exit_exfat_fs(void) { /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache. */ rcu_barrier(); kmem_cache_destroy(exfat_inode_cachep); unregister_filesystem(&exfat_fs_type); exfat_cache_shutdown(); } module_init(init_exfat_fs); module_exit(exit_exfat_fs); MODULE_ALIAS_FS("exfat"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("exFAT filesystem support"); MODULE_AUTHOR("Samsung Electronics Co., Ltd."); |
| 14 8 14 6 2 81 81 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 | // SPDX-License-Identifier: GPL-2.0-or-later /* * UDPLITE An implementation of the UDP-Lite protocol (RFC 3828). * * Authors: Gerrit Renker <gerrit@erg.abdn.ac.uk> * * Changes: * Fixes: */ #define pr_fmt(fmt) "UDPLite: " fmt #include <linux/export.h> #include <linux/proc_fs.h> #include "udp_impl.h" struct udp_table udplite_table __read_mostly; EXPORT_SYMBOL(udplite_table); /* Designate sk as UDP-Lite socket */ static int udplite_sk_init(struct sock *sk) { udp_init_sock(sk); pr_warn_once("UDP-Lite is deprecated and scheduled to be removed in 2025, " "please contact the netdev mailing list\n"); return 0; } static int udplite_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udplite_table, IPPROTO_UDPLITE); } static int udplite_err(struct sk_buff *skb, u32 info) { return __udp4_lib_err(skb, info, &udplite_table); } static const struct net_protocol udplite_protocol = { .handler = udplite_rcv, .err_handler = udplite_err, .no_policy = 1, }; struct proto udplite_prot = { .name = "UDP-Lite", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .init = udplite_sk_init, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min), .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min), .obj_size = sizeof(struct udp_sock), .h.udp_table = &udplite_table, }; EXPORT_SYMBOL(udplite_prot); static struct inet_protosw udplite4_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_UDPLITE, .prot = &udplite_prot, .ops = &inet_dgram_ops, .flags = INET_PROTOSW_PERMANENT, }; #ifdef CONFIG_PROC_FS static struct udp_seq_afinfo udplite4_seq_afinfo = { .family = AF_INET, .udp_table = &udplite_table, }; static int __net_init udplite4_proc_init_net(struct net *net) { if (!proc_create_net_data("udplite", 0444, net->proc_net, &udp_seq_ops, sizeof(struct udp_iter_state), &udplite4_seq_afinfo)) return -ENOMEM; return 0; } static void __net_exit udplite4_proc_exit_net(struct net *net) { remove_proc_entry("udplite", net->proc_net); } static struct pernet_operations udplite4_net_ops = { .init = udplite4_proc_init_net, .exit = udplite4_proc_exit_net, }; static __init int udplite4_proc_init(void) { return register_pernet_subsys(&udplite4_net_ops); } #else static inline int udplite4_proc_init(void) { return 0; } #endif void __init udplite4_register(void) { udp_table_init(&udplite_table, "UDP-Lite"); if (proto_register(&udplite_prot, 1)) goto out_register_err; if (inet_add_protocol(&udplite_protocol, IPPROTO_UDPLITE) < 0) goto out_unregister_proto; inet_register_protosw(&udplite4_protosw); if (udplite4_proc_init()) pr_err("%s: Cannot register /proc!\n", __func__); return; out_unregister_proto: proto_unregister(&udplite_prot); out_register_err: pr_crit("%s: Cannot add UDP-Lite protocol\n", __func__); } |
| 4 4 4 4 4 4 4 4 4 4 2 2 2 2 2 2 3 5 3 3 2 3 6 2 1 1 2 12 10 10 9 8 12 11 8 8 1 7 4 7 3 3 4 2 2 1 5 5 4 4 2 1 2 1 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 | // SPDX-License-Identifier: GPL-2.0 #include <linux/bpf.h> #include <linux/filter.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/netfilter.h> #include <net/netfilter/nf_bpf_link.h> #include <uapi/linux/netfilter_ipv4.h> static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb, const struct nf_hook_state *s) { const struct bpf_prog *prog = bpf_prog; struct bpf_nf_ctx ctx = { .state = s, .skb = skb, }; return bpf_prog_run(prog, &ctx); } struct bpf_nf_link { struct bpf_link link; struct nf_hook_ops hook_ops; netns_tracker ns_tracker; struct net *net; u32 dead; const struct nf_defrag_hook *defrag_hook; }; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) || IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) static const struct nf_defrag_hook * get_proto_defrag_hook(struct bpf_nf_link *link, const struct nf_defrag_hook __rcu **ptr_global_hook, const char *mod) { const struct nf_defrag_hook *hook; int err; /* RCU protects us from races against module unloading */ rcu_read_lock(); hook = rcu_dereference(*ptr_global_hook); if (!hook) { rcu_read_unlock(); err = request_module("%s", mod); if (err) return ERR_PTR(err < 0 ? err : -EINVAL); rcu_read_lock(); hook = rcu_dereference(*ptr_global_hook); } if (hook && try_module_get(hook->owner)) { /* Once we have a refcnt on the module, we no longer need RCU */ hook = rcu_pointer_handoff(hook); } else { WARN_ONCE(!hook, "%s has bad registration", mod); hook = ERR_PTR(-ENOENT); } rcu_read_unlock(); if (!IS_ERR(hook)) { err = hook->enable(link->net); if (err) { module_put(hook->owner); hook = ERR_PTR(err); } } return hook; } #endif static int bpf_nf_enable_defrag(struct bpf_nf_link *link) { const struct nf_defrag_hook __maybe_unused *hook; switch (link->hook_ops.pf) { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV4) case NFPROTO_IPV4: hook = get_proto_defrag_hook(link, &nf_defrag_v4_hook, "nf_defrag_ipv4"); if (IS_ERR(hook)) return PTR_ERR(hook); link->defrag_hook = hook; return 0; #endif #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) case NFPROTO_IPV6: hook = get_proto_defrag_hook(link, &nf_defrag_v6_hook, "nf_defrag_ipv6"); if (IS_ERR(hook)) return PTR_ERR(hook); link->defrag_hook = hook; return 0; #endif default: return -EAFNOSUPPORT; } } static void bpf_nf_disable_defrag(struct bpf_nf_link *link) { const struct nf_defrag_hook *hook = link->defrag_hook; if (!hook) return; hook->disable(link->net); module_put(hook->owner); } static void bpf_nf_link_release(struct bpf_link *link) { struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); if (nf_link->dead) return; /* do not double release in case .detach was already called */ if (!cmpxchg(&nf_link->dead, 0, 1)) { nf_unregister_net_hook(nf_link->net, &nf_link->hook_ops); bpf_nf_disable_defrag(nf_link); put_net_track(nf_link->net, &nf_link->ns_tracker); } } static void bpf_nf_link_dealloc(struct bpf_link *link) { struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); kfree(nf_link); } static int bpf_nf_link_detach(struct bpf_link *link) { bpf_nf_link_release(link); return 0; } static void bpf_nf_link_show_info(const struct bpf_link *link, struct seq_file *seq) { struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); seq_printf(seq, "pf:\t%u\thooknum:\t%u\tprio:\t%d\n", nf_link->hook_ops.pf, nf_link->hook_ops.hooknum, nf_link->hook_ops.priority); } static int bpf_nf_link_fill_link_info(const struct bpf_link *link, struct bpf_link_info *info) { struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link); const struct nf_defrag_hook *hook = nf_link->defrag_hook; info->netfilter.pf = nf_link->hook_ops.pf; info->netfilter.hooknum = nf_link->hook_ops.hooknum; info->netfilter.priority = nf_link->hook_ops.priority; info->netfilter.flags = hook ? BPF_F_NETFILTER_IP_DEFRAG : 0; return 0; } static int bpf_nf_link_update(struct bpf_link *link, struct bpf_prog *new_prog, struct bpf_prog *old_prog) { return -EOPNOTSUPP; } static const struct bpf_link_ops bpf_nf_link_lops = { .release = bpf_nf_link_release, .dealloc = bpf_nf_link_dealloc, .detach = bpf_nf_link_detach, .show_fdinfo = bpf_nf_link_show_info, .fill_link_info = bpf_nf_link_fill_link_info, .update_prog = bpf_nf_link_update, }; static int bpf_nf_check_pf_and_hooks(const union bpf_attr *attr) { int prio; switch (attr->link_create.netfilter.pf) { case NFPROTO_IPV4: case NFPROTO_IPV6: if (attr->link_create.netfilter.hooknum >= NF_INET_NUMHOOKS) return -EPROTO; break; default: return -EAFNOSUPPORT; } if (attr->link_create.netfilter.flags & ~BPF_F_NETFILTER_IP_DEFRAG) return -EOPNOTSUPP; /* make sure conntrack confirm is always last */ prio = attr->link_create.netfilter.priority; if (prio == NF_IP_PRI_FIRST) return -ERANGE; /* sabotage_in and other warts */ else if (prio == NF_IP_PRI_LAST) return -ERANGE; /* e.g. conntrack confirm */ else if ((attr->link_create.netfilter.flags & BPF_F_NETFILTER_IP_DEFRAG) && prio <= NF_IP_PRI_CONNTRACK_DEFRAG) return -ERANGE; /* cannot use defrag if prog runs before nf_defrag */ return 0; } int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) { struct net *net = current->nsproxy->net_ns; struct bpf_link_primer link_primer; struct bpf_nf_link *link; int err; if (attr->link_create.flags) return -EINVAL; err = bpf_nf_check_pf_and_hooks(attr); if (err) return err; link = kzalloc(sizeof(*link), GFP_USER); if (!link) return -ENOMEM; bpf_link_init(&link->link, BPF_LINK_TYPE_NETFILTER, &bpf_nf_link_lops, prog); link->hook_ops.hook = nf_hook_run_bpf; link->hook_ops.hook_ops_type = NF_HOOK_OP_BPF; link->hook_ops.priv = prog; link->hook_ops.pf = attr->link_create.netfilter.pf; link->hook_ops.priority = attr->link_create.netfilter.priority; link->hook_ops.hooknum = attr->link_create.netfilter.hooknum; link->net = net; link->dead = false; link->defrag_hook = NULL; err = bpf_link_prime(&link->link, &link_primer); if (err) { kfree(link); return err; } if (attr->link_create.netfilter.flags & BPF_F_NETFILTER_IP_DEFRAG) { err = bpf_nf_enable_defrag(link); if (err) { bpf_link_cleanup(&link_primer); return err; } } err = nf_register_net_hook(net, &link->hook_ops); if (err) { bpf_nf_disable_defrag(link); bpf_link_cleanup(&link_primer); return err; } get_net_track(net, &link->ns_tracker, GFP_KERNEL); return bpf_link_settle(&link_primer); } const struct bpf_prog_ops netfilter_prog_ops = { .test_run = bpf_prog_test_run_nf, }; static bool nf_ptr_to_btf_id(struct bpf_insn_access_aux *info, const char *name) { struct btf *btf; s32 type_id; btf = bpf_get_btf_vmlinux(); if (IS_ERR_OR_NULL(btf)) return false; type_id = btf_find_by_name_kind(btf, name, BTF_KIND_STRUCT); if (WARN_ON_ONCE(type_id < 0)) return false; info->btf = btf; info->btf_id = type_id; info->reg_type = PTR_TO_BTF_ID | PTR_TRUSTED; return true; } static bool nf_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (off < 0 || off >= sizeof(struct bpf_nf_ctx)) return false; if (type == BPF_WRITE) return false; switch (off) { case bpf_ctx_range(struct bpf_nf_ctx, skb): if (size != sizeof_field(struct bpf_nf_ctx, skb)) return false; return nf_ptr_to_btf_id(info, "sk_buff"); case bpf_ctx_range(struct bpf_nf_ctx, state): if (size != sizeof_field(struct bpf_nf_ctx, state)) return false; return nf_ptr_to_btf_id(info, "nf_hook_state"); default: return false; } return false; } static const struct bpf_func_proto * bpf_nf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { return bpf_base_func_proto(func_id, prog); } const struct bpf_verifier_ops netfilter_verifier_ops = { .is_valid_access = nf_is_valid_access, .get_func_proto = bpf_nf_func_proto, }; |
| 12 12 11 11 11 9 2 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 29 29 9 9 9 9 9 9 9 9 6 6 6 6 10 11 3 2 4 3 2 1 1 11 11 11 3 2 3 3 11 10 3 7 3 6 11 5 5 5 5 2 29 29 30 28 29 28 7 1 6 4 10 3 4 2 1 15 22 14 14 14 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 | // SPDX-License-Identifier: GPL-2.0-or-later /* * User-space I/O driver support for HID subsystem * Copyright (c) 2012 David Herrmann */ /* */ #include <linux/atomic.h> #include <linux/compat.h> #include <linux/cred.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/uhid.h> #include <linux/wait.h> #define UHID_NAME "uhid" #define UHID_BUFSIZE 32 struct uhid_device { struct mutex devlock; /* This flag tracks whether the HID device is usable for commands from * userspace. The flag is already set before hid_add_device(), which * runs in workqueue context, to allow hid_add_device() to communicate * with userspace. * However, if hid_add_device() fails, the flag is cleared without * holding devlock. * We guarantee that if @running changes from true to false while you're * holding @devlock, it's still fine to access @hid. */ bool running; __u8 *rd_data; uint rd_size; /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */ struct hid_device *hid; struct uhid_event input_buf; wait_queue_head_t waitq; spinlock_t qlock; __u8 head; __u8 tail; struct uhid_event *outq[UHID_BUFSIZE]; /* blocking GET_REPORT support; state changes protected by qlock */ struct mutex report_lock; wait_queue_head_t report_wait; bool report_running; u32 report_id; u32 report_type; struct uhid_event report_buf; struct work_struct worker; }; static struct miscdevice uhid_misc; static void uhid_device_add_worker(struct work_struct *work) { struct uhid_device *uhid = container_of(work, struct uhid_device, worker); int ret; ret = hid_add_device(uhid->hid); if (ret) { hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); /* We used to call hid_destroy_device() here, but that's really * messy to get right because we have to coordinate with * concurrent writes from userspace that might be in the middle * of using uhid->hid. * Just leave uhid->hid as-is for now, and clean it up when * userspace tries to close or reinitialize the uhid instance. * * However, we do have to clear the ->running flag and do a * wakeup to make sure userspace knows that the device is gone. */ WRITE_ONCE(uhid->running, false); wake_up_interruptible(&uhid->report_wait); } } static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) { __u8 newhead; newhead = (uhid->head + 1) % UHID_BUFSIZE; if (newhead != uhid->tail) { uhid->outq[uhid->head] = ev; uhid->head = newhead; wake_up_interruptible(&uhid->waitq); } else { hid_warn(uhid->hid, "Output queue is full\n"); kfree(ev); } } static int uhid_queue_event(struct uhid_device *uhid, __u32 event) { unsigned long flags; struct uhid_event *ev; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = event; spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static int uhid_hid_start(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; struct uhid_event *ev; unsigned long flags; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_START; if (hid->report_enum[HID_FEATURE_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_FEATURE_REPORTS; if (hid->report_enum[HID_OUTPUT_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_OUTPUT_REPORTS; if (hid->report_enum[HID_INPUT_REPORT].numbered) ev->u.start.dev_flags |= UHID_DEV_NUMBERED_INPUT_REPORTS; spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return 0; } static void uhid_hid_stop(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; hid->claimed = 0; uhid_queue_event(uhid, UHID_STOP); } static int uhid_hid_open(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return uhid_queue_event(uhid, UHID_OPEN); } static void uhid_hid_close(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; uhid_queue_event(uhid, UHID_CLOSE); } static int uhid_hid_parse(struct hid_device *hid) { struct uhid_device *uhid = hid->driver_data; return hid_parse_report(hid, uhid->rd_data, uhid->rd_size); } /* must be called with report_lock held */ static int __uhid_report_queue_and_wait(struct uhid_device *uhid, struct uhid_event *ev, __u32 *report_id) { unsigned long flags; int ret; spin_lock_irqsave(&uhid->qlock, flags); *report_id = ++uhid->report_id; uhid->report_type = ev->type + 1; uhid->report_running = true; uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); ret = wait_event_interruptible_timeout(uhid->report_wait, !uhid->report_running || !READ_ONCE(uhid->running), 5 * HZ); if (!ret || !READ_ONCE(uhid->running) || uhid->report_running) ret = -EIO; else if (ret < 0) ret = -ERESTARTSYS; else ret = 0; uhid->report_running = false; return ret; } static void uhid_report_wake_up(struct uhid_device *uhid, u32 id, const struct uhid_event *ev) { unsigned long flags; spin_lock_irqsave(&uhid->qlock, flags); /* id for old report; drop it silently */ if (uhid->report_type != ev->type || uhid->report_id != id) goto unlock; if (!uhid->report_running) goto unlock; memcpy(&uhid->report_buf, ev, sizeof(*ev)); uhid->report_running = false; wake_up_interruptible(&uhid->report_wait); unlock: spin_unlock_irqrestore(&uhid->qlock, flags); } static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum, u8 *buf, size_t count, u8 rtype) { struct uhid_device *uhid = hid->driver_data; struct uhid_get_report_reply_req *req; struct uhid_event *ev; int ret; if (!READ_ONCE(uhid->running)) return -EIO; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_GET_REPORT; ev->u.get_report.rnum = rnum; ev->u.get_report.rtype = rtype; ret = mutex_lock_interruptible(&uhid->report_lock); if (ret) { kfree(ev); return ret; } /* this _always_ takes ownership of @ev */ ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.get_report.id); if (ret) goto unlock; req = &uhid->report_buf.u.get_report_reply; if (req->err) { ret = -EIO; } else { ret = min3(count, (size_t)req->size, (size_t)UHID_DATA_MAX); memcpy(buf, req->data, ret); } unlock: mutex_unlock(&uhid->report_lock); return ret; } static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum, const u8 *buf, size_t count, u8 rtype) { struct uhid_device *uhid = hid->driver_data; struct uhid_event *ev; int ret; if (!READ_ONCE(uhid->running) || count > UHID_DATA_MAX) return -EIO; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_SET_REPORT; ev->u.set_report.rnum = rnum; ev->u.set_report.rtype = rtype; ev->u.set_report.size = count; memcpy(ev->u.set_report.data, buf, count); ret = mutex_lock_interruptible(&uhid->report_lock); if (ret) { kfree(ev); return ret; } /* this _always_ takes ownership of @ev */ ret = __uhid_report_queue_and_wait(uhid, ev, &ev->u.set_report.id); if (ret) goto unlock; if (uhid->report_buf.u.set_report_reply.err) ret = -EIO; else ret = count; unlock: mutex_unlock(&uhid->report_lock); return ret; } static int uhid_hid_raw_request(struct hid_device *hid, unsigned char reportnum, __u8 *buf, size_t len, unsigned char rtype, int reqtype) { u8 u_rtype; switch (rtype) { case HID_FEATURE_REPORT: u_rtype = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: u_rtype = UHID_OUTPUT_REPORT; break; case HID_INPUT_REPORT: u_rtype = UHID_INPUT_REPORT; break; default: return -EINVAL; } switch (reqtype) { case HID_REQ_GET_REPORT: return uhid_hid_get_report(hid, reportnum, buf, len, u_rtype); case HID_REQ_SET_REPORT: return uhid_hid_set_report(hid, reportnum, buf, len, u_rtype); default: return -EIO; } } static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { struct uhid_device *uhid = hid->driver_data; __u8 rtype; unsigned long flags; struct uhid_event *ev; switch (report_type) { case HID_FEATURE_REPORT: rtype = UHID_FEATURE_REPORT; break; case HID_OUTPUT_REPORT: rtype = UHID_OUTPUT_REPORT; break; default: return -EINVAL; } if (count < 1 || count > UHID_DATA_MAX) return -EINVAL; ev = kzalloc(sizeof(*ev), GFP_KERNEL); if (!ev) return -ENOMEM; ev->type = UHID_OUTPUT; ev->u.output.size = count; ev->u.output.rtype = rtype; memcpy(ev->u.output.data, buf, count); spin_lock_irqsave(&uhid->qlock, flags); uhid_queue(uhid, ev); spin_unlock_irqrestore(&uhid->qlock, flags); return count; } static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf, size_t count) { return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT); } static const struct hid_ll_driver uhid_hid_driver = { .start = uhid_hid_start, .stop = uhid_hid_stop, .open = uhid_hid_open, .close = uhid_hid_close, .parse = uhid_hid_parse, .raw_request = uhid_hid_raw_request, .output_report = uhid_hid_output_report, .max_buffer_size = UHID_DATA_MAX, }; #ifdef CONFIG_COMPAT /* Apparently we haven't stepped on these rakes enough times yet. */ struct uhid_create_req_compat { __u8 name[128]; __u8 phys[64]; __u8 uniq[64]; compat_uptr_t rd_data; __u16 rd_size; __u16 bus; __u32 vendor; __u32 product; __u32 version; __u32 country; } __attribute__((__packed__)); static int uhid_event_from_user(const char __user *buffer, size_t len, struct uhid_event *event) { if (in_compat_syscall()) { u32 type; if (get_user(type, buffer)) return -EFAULT; if (type == UHID_CREATE) { /* * This is our messed up request with compat pointer. * It is largish (more than 256 bytes) so we better * allocate it from the heap. */ struct uhid_create_req_compat *compat; compat = kzalloc(sizeof(*compat), GFP_KERNEL); if (!compat) return -ENOMEM; buffer += sizeof(type); len -= sizeof(type); if (copy_from_user(compat, buffer, min(len, sizeof(*compat)))) { kfree(compat); return -EFAULT; } /* Shuffle the data over to proper structure */ event->type = type; memcpy(event->u.create.name, compat->name, sizeof(compat->name)); memcpy(event->u.create.phys, compat->phys, sizeof(compat->phys)); memcpy(event->u.create.uniq, compat->uniq, sizeof(compat->uniq)); event->u.create.rd_data = compat_ptr(compat->rd_data); event->u.create.rd_size = compat->rd_size; event->u.create.bus = compat->bus; event->u.create.vendor = compat->vendor; event->u.create.product = compat->product; event->u.create.version = compat->version; event->u.create.country = compat->country; kfree(compat); return 0; } /* All others can be copied directly */ } if (copy_from_user(event, buffer, min(len, sizeof(*event)))) return -EFAULT; return 0; } #else static int uhid_event_from_user(const char __user *buffer, size_t len, struct uhid_event *event) { if (copy_from_user(event, buffer, min(len, sizeof(*event)))) return -EFAULT; return 0; } #endif static int uhid_dev_create2(struct uhid_device *uhid, const struct uhid_event *ev) { struct hid_device *hid; size_t rd_size; void *rd_data; int ret; if (uhid->hid) return -EALREADY; rd_size = ev->u.create2.rd_size; if (rd_size <= 0 || rd_size > HID_MAX_DESCRIPTOR_SIZE) return -EINVAL; rd_data = kmemdup(ev->u.create2.rd_data, rd_size, GFP_KERNEL); if (!rd_data) return -ENOMEM; uhid->rd_size = rd_size; uhid->rd_data = rd_data; hid = hid_allocate_device(); if (IS_ERR(hid)) { ret = PTR_ERR(hid); goto err_free; } BUILD_BUG_ON(sizeof(hid->name) != sizeof(ev->u.create2.name)); strscpy(hid->name, ev->u.create2.name, sizeof(hid->name)); BUILD_BUG_ON(sizeof(hid->phys) != sizeof(ev->u.create2.phys)); strscpy(hid->phys, ev->u.create2.phys, sizeof(hid->phys)); BUILD_BUG_ON(sizeof(hid->uniq) != sizeof(ev->u.create2.uniq)); strscpy(hid->uniq, ev->u.create2.uniq, sizeof(hid->uniq)); hid->ll_driver = &uhid_hid_driver; hid->bus = ev->u.create2.bus; hid->vendor = ev->u.create2.vendor; hid->product = ev->u.create2.product; hid->version = ev->u.create2.version; hid->country = ev->u.create2.country; hid->driver_data = uhid; hid->dev.parent = uhid_misc.this_device; uhid->hid = hid; uhid->running = true; /* Adding of a HID device is done through a worker, to allow HID drivers * which use feature requests during .probe to work, without they would * be blocked on devlock, which is held by uhid_char_write. */ schedule_work(&uhid->worker); return 0; err_free: kfree(uhid->rd_data); uhid->rd_data = NULL; uhid->rd_size = 0; return ret; } static int uhid_dev_create(struct uhid_device *uhid, struct uhid_event *ev) { struct uhid_create_req orig; orig = ev->u.create; if (orig.rd_size <= 0 || orig.rd_size > HID_MAX_DESCRIPTOR_SIZE) return -EINVAL; if (copy_from_user(&ev->u.create2.rd_data, orig.rd_data, orig.rd_size)) return -EFAULT; memcpy(ev->u.create2.name, orig.name, sizeof(orig.name)); memcpy(ev->u.create2.phys, orig.phys, sizeof(orig.phys)); memcpy(ev->u.create2.uniq, orig.uniq, sizeof(orig.uniq)); ev->u.create2.rd_size = orig.rd_size; ev->u.create2.bus = orig.bus; ev->u.create2.vendor = orig.vendor; ev->u.create2.product = orig.product; ev->u.create2.version = orig.version; ev->u.create2.country = orig.country; return uhid_dev_create2(uhid, ev); } static int uhid_dev_destroy(struct uhid_device *uhid) { if (!uhid->hid) return -EINVAL; WRITE_ONCE(uhid->running, false); wake_up_interruptible(&uhid->report_wait); cancel_work_sync(&uhid->worker); hid_destroy_device(uhid->hid); uhid->hid = NULL; kfree(uhid->rd_data); return 0; } static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev) { if (!READ_ONCE(uhid->running)) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data, min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0); return 0; } static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev) { if (!READ_ONCE(uhid->running)) return -EINVAL; hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data, min_t(size_t, ev->u.input2.size, UHID_DATA_MAX), 0); return 0; } static int uhid_dev_get_report_reply(struct uhid_device *uhid, struct uhid_event *ev) { if (!READ_ONCE(uhid->running)) return -EINVAL; uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev); return 0; } static int uhid_dev_set_report_reply(struct uhid_device *uhid, struct uhid_event *ev) { if (!READ_ONCE(uhid->running)) return -EINVAL; uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev); return 0; } static int uhid_char_open(struct inode *inode, struct file *file) { struct uhid_device *uhid; uhid = kzalloc(sizeof(*uhid), GFP_KERNEL); if (!uhid) return -ENOMEM; mutex_init(&uhid->devlock); mutex_init(&uhid->report_lock); spin_lock_init(&uhid->qlock); init_waitqueue_head(&uhid->waitq); init_waitqueue_head(&uhid->report_wait); uhid->running = false; INIT_WORK(&uhid->worker, uhid_device_add_worker); file->private_data = uhid; stream_open(inode, file); return 0; } static int uhid_char_release(struct inode *inode, struct file *file) { struct uhid_device *uhid = file->private_data; unsigned int i; uhid_dev_destroy(uhid); for (i = 0; i < UHID_BUFSIZE; ++i) kfree(uhid->outq[i]); kfree(uhid); return 0; } static ssize_t uhid_char_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct uhid_device *uhid = file->private_data; int ret; unsigned long flags; size_t len; /* they need at least the "type" member of uhid_event */ if (count < sizeof(__u32)) return -EINVAL; try_again: if (file->f_flags & O_NONBLOCK) { if (uhid->head == uhid->tail) return -EAGAIN; } else { ret = wait_event_interruptible(uhid->waitq, uhid->head != uhid->tail); if (ret) return ret; } ret = mutex_lock_interruptible(&uhid->devlock); if (ret) return ret; if (uhid->head == uhid->tail) { mutex_unlock(&uhid->devlock); goto try_again; } else { len = min(count, sizeof(**uhid->outq)); if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) { ret = -EFAULT; } else { kfree(uhid->outq[uhid->tail]); uhid->outq[uhid->tail] = NULL; spin_lock_irqsave(&uhid->qlock, flags); uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE; spin_unlock_irqrestore(&uhid->qlock, flags); } } mutex_unlock(&uhid->devlock); return ret ? ret : len; } static ssize_t uhid_char_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) { struct uhid_device *uhid = file->private_data; int ret; size_t len; /* we need at least the "type" member of uhid_event */ if (count < sizeof(__u32)) return -EINVAL; ret = mutex_lock_interruptible(&uhid->devlock); if (ret) return ret; memset(&uhid->input_buf, 0, sizeof(uhid->input_buf)); len = min(count, sizeof(uhid->input_buf)); ret = uhid_event_from_user(buffer, len, &uhid->input_buf); if (ret) goto unlock; switch (uhid->input_buf.type) { case UHID_CREATE: /* * 'struct uhid_create_req' contains a __user pointer which is * copied from, so it's unsafe to allow this with elevated * privileges (e.g. from a setuid binary) or via kernel_write(). */ if (file->f_cred != current_cred()) { pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n", task_tgid_vnr(current), current->comm); ret = -EACCES; goto unlock; } ret = uhid_dev_create(uhid, &uhid->input_buf); break; case UHID_CREATE2: ret = uhid_dev_create2(uhid, &uhid->input_buf); break; case UHID_DESTROY: ret = uhid_dev_destroy(uhid); break; case UHID_INPUT: ret = uhid_dev_input(uhid, &uhid->input_buf); break; case UHID_INPUT2: ret = uhid_dev_input2(uhid, &uhid->input_buf); break; case UHID_GET_REPORT_REPLY: ret = uhid_dev_get_report_reply(uhid, &uhid->input_buf); break; case UHID_SET_REPORT_REPLY: ret = uhid_dev_set_report_reply(uhid, &uhid->input_buf); break; default: ret = -EOPNOTSUPP; } unlock: mutex_unlock(&uhid->devlock); /* return "count" not "len" to not confuse the caller */ return ret ? ret : count; } static __poll_t uhid_char_poll(struct file *file, poll_table *wait) { struct uhid_device *uhid = file->private_data; __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */ poll_wait(file, &uhid->waitq, wait); if (uhid->head != uhid->tail) mask |= EPOLLIN | EPOLLRDNORM; return mask; } static const struct file_operations uhid_fops = { .owner = THIS_MODULE, .open = uhid_char_open, .release = uhid_char_release, .read = uhid_char_read, .write = uhid_char_write, .poll = uhid_char_poll, }; static struct miscdevice uhid_misc = { .fops = &uhid_fops, .minor = UHID_MINOR, .name = UHID_NAME, }; module_misc_device(uhid_misc); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); MODULE_ALIAS_MISCDEV(UHID_MINOR); MODULE_ALIAS("devname:" UHID_NAME); |
| 83 365 293 83 83 83 482 365 139 485 285 217 54 54 54 517 517 3 517 517 479 42 494 31 516 145 219 18 16 5 25 49 58 49 5 49 44 15 49 5 5 94 94 93 53 9 27 27 27 2 25 23 49 49 26 26 20 26 21 21 9 21 27 27 13 25 16 16 5 16 48 43 18 37 4 33 33 4 36 18 29 16 16 32 30 27 32 8 7 6 5 8 40 40 40 8 32 7 786 788 3 107 110 107 107 2 34 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/stat.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/blkdev.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/highuid.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/cred.h> #include <linux/syscalls.h> #include <linux/pagemap.h> #include <linux/compat.h> #include <linux/iversion.h> #include <linux/uaccess.h> #include <asm/unistd.h> #include <trace/events/timestamp.h> #include "internal.h" #include "mount.h" /** * fill_mg_cmtime - Fill in the mtime and ctime and flag ctime as QUERIED * @stat: where to store the resulting values * @request_mask: STATX_* values requested * @inode: inode from which to grab the c/mtime * * Given @inode, grab the ctime and mtime out if it and store the result * in @stat. When fetching the value, flag it as QUERIED (if not already) * so the next write will record a distinct timestamp. * * NB: The QUERIED flag is tracked in the ctime, but we set it there even * if only the mtime was requested, as that ensures that the next mtime * change will be distinct. */ void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode) { atomic_t *pcn = (atomic_t *)&inode->i_ctime_nsec; /* If neither time was requested, then don't report them */ if (!(request_mask & (STATX_CTIME|STATX_MTIME))) { stat->result_mask &= ~(STATX_CTIME|STATX_MTIME); return; } stat->mtime = inode_get_mtime(inode); stat->ctime.tv_sec = inode->i_ctime_sec; stat->ctime.tv_nsec = (u32)atomic_read(pcn); if (!(stat->ctime.tv_nsec & I_CTIME_QUERIED)) stat->ctime.tv_nsec = ((u32)atomic_fetch_or(I_CTIME_QUERIED, pcn)); stat->ctime.tv_nsec &= ~I_CTIME_QUERIED; trace_fill_mg_cmtime(inode, &stat->ctime, &stat->mtime); } EXPORT_SYMBOL(fill_mg_cmtime); /** * generic_fillattr - Fill in the basic attributes from the inode struct * @idmap: idmap of the mount the inode was found from * @request_mask: statx request_mask * @inode: Inode to use as the source * @stat: Where to fill in the attributes * * Fill in the basic attributes in the kstat structure from data that's to be * found on the VFS inode structure. This is the default if no getattr inode * operation is supplied. * * If the inode has been found through an idmapped mount the idmap of * the vfsmount must be passed through @idmap. This function will then * take care to map the inode according to @idmap before filling in the * uid and gid filds. On non-idmapped mounts or if permission checking is to be * performed on the raw inode simply pass @nop_mnt_idmap. */ void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask, struct inode *inode, struct kstat *stat) { vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode); vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode); stat->dev = inode->i_sb->s_dev; stat->ino = inode->i_ino; stat->mode = inode->i_mode; stat->nlink = inode->i_nlink; stat->uid = vfsuid_into_kuid(vfsuid); stat->gid = vfsgid_into_kgid(vfsgid); stat->rdev = inode->i_rdev; stat->size = i_size_read(inode); stat->atime = inode_get_atime(inode); if (is_mgtime(inode)) { fill_mg_cmtime(stat, request_mask, inode); } else { stat->ctime = inode_get_ctime(inode); stat->mtime = inode_get_mtime(inode); } stat->blksize = i_blocksize(inode); stat->blocks = inode->i_blocks; if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) { stat->result_mask |= STATX_CHANGE_COOKIE; stat->change_cookie = inode_query_iversion(inode); } } EXPORT_SYMBOL(generic_fillattr); /** * generic_fill_statx_attr - Fill in the statx attributes from the inode flags * @inode: Inode to use as the source * @stat: Where to fill in the attribute flags * * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the * inode that are published on i_flags and enforced by the VFS. */ void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) { if (inode->i_flags & S_IMMUTABLE) stat->attributes |= STATX_ATTR_IMMUTABLE; if (inode->i_flags & S_APPEND) stat->attributes |= STATX_ATTR_APPEND; stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; } EXPORT_SYMBOL(generic_fill_statx_attr); /** * generic_fill_statx_atomic_writes - Fill in atomic writes statx attributes * @stat: Where to fill in the attribute flags * @unit_min: Minimum supported atomic write length in bytes * @unit_max: Maximum supported atomic write length in bytes * @unit_max_opt: Optimised maximum supported atomic write length in bytes * * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags in the kstat structure from * atomic write unit_min and unit_max values. */ void generic_fill_statx_atomic_writes(struct kstat *stat, unsigned int unit_min, unsigned int unit_max, unsigned int unit_max_opt) { /* Confirm that the request type is known */ stat->result_mask |= STATX_WRITE_ATOMIC; /* Confirm that the file attribute type is known */ stat->attributes_mask |= STATX_ATTR_WRITE_ATOMIC; if (unit_min) { stat->atomic_write_unit_min = unit_min; stat->atomic_write_unit_max = unit_max; stat->atomic_write_unit_max_opt = unit_max_opt; /* Initially only allow 1x segment */ stat->atomic_write_segments_max = 1; /* Confirm atomic writes are actually supported */ stat->attributes |= STATX_ATTR_WRITE_ATOMIC; } } EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_writes); /** * vfs_getattr_nosec - getattr without security checks * @path: file to get attributes from * @stat: structure to return attributes in * @request_mask: STATX_xxx flags indicating what the caller wants * @query_flags: Query mode (AT_STATX_SYNC_TYPE) * * Get attributes without calling security_inode_getattr. * * Currently the only caller other than vfs_getattr is internal to the * filehandle lookup code, which uses only the inode number and returns no * attributes to any user. Any other code probably wants vfs_getattr. */ int vfs_getattr_nosec(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct mnt_idmap *idmap; struct inode *inode = d_backing_inode(path->dentry); memset(stat, 0, sizeof(*stat)); stat->result_mask |= STATX_BASIC_STATS; query_flags &= AT_STATX_SYNC_TYPE; /* allow the fs to override these if it really wants to */ /* SB_NOATIME means filesystem supplies dummy atime value */ if (inode->i_sb->s_flags & SB_NOATIME) stat->result_mask &= ~STATX_ATIME; /* * Note: If you add another clause to set an attribute flag, please * update attributes_mask below. */ if (IS_AUTOMOUNT(inode)) stat->attributes |= STATX_ATTR_AUTOMOUNT; if (IS_DAX(inode)) stat->attributes |= STATX_ATTR_DAX; stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | STATX_ATTR_DAX); idmap = mnt_idmap(path->mnt); if (inode->i_op->getattr) { int ret; ret = inode->i_op->getattr(idmap, path, stat, request_mask, query_flags); if (ret) return ret; } else { generic_fillattr(idmap, request_mask, inode, stat); } /* * If this is a block device inode, override the filesystem attributes * with the block device specific parameters that need to be obtained * from the bdev backing inode. */ if (S_ISBLK(stat->mode)) bdev_statx(path, stat, request_mask); return 0; } EXPORT_SYMBOL(vfs_getattr_nosec); /* * vfs_getattr - Get the enhanced basic attributes of a file * @path: The file of interest * @stat: Where to return the statistics * @request_mask: STATX_xxx flags indicating what the caller wants * @query_flags: Query mode (AT_STATX_SYNC_TYPE) * * Ask the filesystem for a file's attributes. The caller must indicate in * request_mask and query_flags to indicate what they want. * * If the file is remote, the filesystem can be forced to update the attributes * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can * suppress the update by passing AT_STATX_DONT_SYNC. * * Bits must have been set in request_mask to indicate which attributes the * caller wants retrieving. Any such attribute not requested may be returned * anyway, but the value may be approximate, and, if remote, may not have been * synchronised with the server. * * 0 will be returned on success, and a -ve error code if unsuccessful. */ int vfs_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { int retval; retval = security_inode_getattr(path); if (unlikely(retval)) return retval; return vfs_getattr_nosec(path, stat, request_mask, query_flags); } EXPORT_SYMBOL(vfs_getattr); /** * vfs_fstat - Get the basic attributes by file descriptor * @fd: The file descriptor referring to the file of interest * @stat: The result structure to fill in. * * This function is a wrapper around vfs_getattr(). The main difference is * that it uses a file descriptor to determine the file location. * * 0 will be returned on success, and a -ve error code if unsuccessful. */ int vfs_fstat(int fd, struct kstat *stat) { CLASS(fd_raw, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_getattr(&fd_file(f)->f_path, stat, STATX_BASIC_STATS, 0); } static int statx_lookup_flags(int flags) { int lookup_flags = 0; if (!(flags & AT_SYMLINK_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (!(flags & AT_NO_AUTOMOUNT)) lookup_flags |= LOOKUP_AUTOMOUNT; return lookup_flags; } static int vfs_statx_path(struct path *path, int flags, struct kstat *stat, u32 request_mask) { int error = vfs_getattr(path, stat, request_mask, flags); if (error) return error; if (request_mask & STATX_MNT_ID_UNIQUE) { stat->mnt_id = real_mount(path->mnt)->mnt_id_unique; stat->result_mask |= STATX_MNT_ID_UNIQUE; } else { stat->mnt_id = real_mount(path->mnt)->mnt_id; stat->result_mask |= STATX_MNT_ID; } if (path_mounted(path)) stat->attributes |= STATX_ATTR_MOUNT_ROOT; stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; return 0; } static int vfs_statx_fd(int fd, int flags, struct kstat *stat, u32 request_mask) { CLASS(fd_raw, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_statx_path(&fd_file(f)->f_path, flags, stat, request_mask); } /** * vfs_statx - Get basic and extra attributes by filename * @dfd: A file descriptor representing the base dir for a relative filename * @filename: The name of the file of interest * @flags: Flags to control the query * @stat: The result structure to fill in. * @request_mask: STATX_xxx flags indicating what the caller wants * * This function is a wrapper around vfs_getattr(). The main difference is * that it uses a filename and base directory to determine the file location. * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink * at the given name from being referenced. * * 0 will be returned on success, and a -ve error code if unsuccessful. */ static int vfs_statx(int dfd, struct filename *filename, int flags, struct kstat *stat, u32 request_mask) { struct path path; unsigned int lookup_flags = statx_lookup_flags(flags); int error; if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | AT_STATX_SYNC_TYPE)) return -EINVAL; retry: error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); if (error) return error; error = vfs_statx_path(&path, flags, stat, request_mask); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, int flags) { int ret; int statx_flags = flags | AT_NO_AUTOMOUNT; struct filename *name = getname_maybe_null(filename, flags); if (!name && dfd >= 0) return vfs_fstat(dfd, stat); ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); putname(name); return ret; } #ifdef __ARCH_WANT_OLD_STAT /* * For backward compatibility? Maybe this should be moved * into arch/i386 instead? */ static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) { static int warncount = 5; struct __old_kernel_stat tmp; if (warncount > 0) { warncount--; printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", current->comm); } else if (warncount < 0) { /* it's laughable, but... */ warncount = 0; } memset(&tmp, 0, sizeof(struct __old_kernel_stat)); tmp.st_dev = old_encode_dev(stat->dev); tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; if (tmp.st_nlink != stat->nlink) return -EOVERFLOW; SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); tmp.st_rdev = old_encode_dev(stat->rdev); #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) return -EOVERFLOW; #endif tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_ctime = stat->ctime.tv_sec; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } SYSCALL_DEFINE2(stat, const char __user *, filename, struct __old_kernel_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_stat(filename, &stat); if (unlikely(error)) return error; return cp_old_stat(&stat, statbuf); } SYSCALL_DEFINE2(lstat, const char __user *, filename, struct __old_kernel_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_lstat(filename, &stat); if (unlikely(error)) return error; return cp_old_stat(&stat, statbuf); } SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_fstat(fd, &stat); if (unlikely(error)) return error; return cp_old_stat(&stat, statbuf); } #endif /* __ARCH_WANT_OLD_STAT */ #ifdef __ARCH_WANT_NEW_STAT #ifndef INIT_STRUCT_STAT_PADDING # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) #endif static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) { struct stat tmp; if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) return -EOVERFLOW; if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) return -EOVERFLOW; #if BITS_PER_LONG == 32 if (stat->size > MAX_NON_LFS) return -EOVERFLOW; #endif INIT_STRUCT_STAT_PADDING(tmp); tmp.st_dev = new_encode_dev(stat->dev); tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; if (tmp.st_nlink != stat->nlink) return -EOVERFLOW; SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); tmp.st_rdev = new_encode_dev(stat->rdev); tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_ctime = stat->ctime.tv_sec; #ifdef STAT_HAVE_NSEC tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; #endif tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } SYSCALL_DEFINE2(newstat, const char __user *, filename, struct stat __user *, statbuf) { struct kstat stat; int error; error = vfs_stat(filename, &stat); if (unlikely(error)) return error; return cp_new_stat(&stat, statbuf); } SYSCALL_DEFINE2(newlstat, const char __user *, filename, struct stat __user *, statbuf) { struct kstat stat; int error; error = vfs_lstat(filename, &stat); if (unlikely(error)) return error; return cp_new_stat(&stat, statbuf); } #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, struct stat __user *, statbuf, int, flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (unlikely(error)) return error; return cp_new_stat(&stat, statbuf); } #endif SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) { struct kstat stat; int error; error = vfs_fstat(fd, &stat); if (unlikely(error)) return error; return cp_new_stat(&stat, statbuf); } #endif static int do_readlinkat(int dfd, const char __user *pathname, char __user *buf, int bufsiz) { struct path path; struct filename *name; int error; unsigned int lookup_flags = LOOKUP_EMPTY; if (bufsiz <= 0) return -EINVAL; retry: name = getname_flags(pathname, lookup_flags); error = filename_lookup(dfd, name, lookup_flags, &path, NULL); if (unlikely(error)) { putname(name); return error; } /* * AFS mountpoints allow readlink(2) but are not symlinks */ if (d_is_symlink(path.dentry) || d_backing_inode(path.dentry)->i_op->readlink) { error = security_inode_readlink(path.dentry); if (!error) { touch_atime(&path); error = vfs_readlink(path.dentry, buf, bufsiz); } } else { error = (name->name[0] == '\0') ? -ENOENT : -EINVAL; } path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, char __user *, buf, int, bufsiz) { return do_readlinkat(dfd, pathname, buf, bufsiz); } SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, int, bufsiz) { return do_readlinkat(AT_FDCWD, path, buf, bufsiz); } /* ---------- LFS-64 ----------- */ #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) #ifndef INIT_STRUCT_STAT64_PADDING # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) #endif static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) { struct stat64 tmp; INIT_STRUCT_STAT64_PADDING(tmp); #ifdef CONFIG_MIPS /* mips has weird padding, so we don't get 64 bits there */ tmp.st_dev = new_encode_dev(stat->dev); tmp.st_rdev = new_encode_dev(stat->rdev); #else tmp.st_dev = huge_encode_dev(stat->dev); tmp.st_rdev = huge_encode_dev(stat->rdev); #endif tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; #ifdef STAT64_HAS_BROKEN_ST_INO tmp.__st_ino = stat->ino; #endif tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); tmp.st_atime = stat->atime.tv_sec; tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; tmp.st_size = stat->size; tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; } SYSCALL_DEFINE2(stat64, const char __user *, filename, struct stat64 __user *, statbuf) { struct kstat stat; int error = vfs_stat(filename, &stat); if (!error) error = cp_new_stat64(&stat, statbuf); return error; } SYSCALL_DEFINE2(lstat64, const char __user *, filename, struct stat64 __user *, statbuf) { struct kstat stat; int error = vfs_lstat(filename, &stat); if (!error) error = cp_new_stat64(&stat, statbuf); return error; } SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_new_stat64(&stat, statbuf); return error; } SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, struct stat64 __user *, statbuf, int, flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_new_stat64(&stat, statbuf); } #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ static noinline_for_stack int cp_statx(const struct kstat *stat, struct statx __user *buffer) { struct statx tmp; memset(&tmp, 0, sizeof(tmp)); /* STATX_CHANGE_COOKIE is kernel-only for now */ tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE; tmp.stx_blksize = stat->blksize; /* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */ tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC; tmp.stx_nlink = stat->nlink; tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); tmp.stx_mode = stat->mode; tmp.stx_ino = stat->ino; tmp.stx_size = stat->size; tmp.stx_blocks = stat->blocks; tmp.stx_attributes_mask = stat->attributes_mask; tmp.stx_atime.tv_sec = stat->atime.tv_sec; tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; tmp.stx_btime.tv_sec = stat->btime.tv_sec; tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; tmp.stx_rdev_major = MAJOR(stat->rdev); tmp.stx_rdev_minor = MINOR(stat->rdev); tmp.stx_dev_major = MAJOR(stat->dev); tmp.stx_dev_minor = MINOR(stat->dev); tmp.stx_mnt_id = stat->mnt_id; tmp.stx_dio_mem_align = stat->dio_mem_align; tmp.stx_dio_offset_align = stat->dio_offset_align; tmp.stx_dio_read_offset_align = stat->dio_read_offset_align; tmp.stx_subvol = stat->subvol; tmp.stx_atomic_write_unit_min = stat->atomic_write_unit_min; tmp.stx_atomic_write_unit_max = stat->atomic_write_unit_max; tmp.stx_atomic_write_segments_max = stat->atomic_write_segments_max; tmp.stx_atomic_write_unit_max_opt = stat->atomic_write_unit_max_opt; return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; } int do_statx(int dfd, struct filename *filename, unsigned int flags, unsigned int mask, struct statx __user *buffer) { struct kstat stat; int error; if (mask & STATX__RESERVED) return -EINVAL; if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) return -EINVAL; /* * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests * from userland. */ mask &= ~STATX_CHANGE_COOKIE; error = vfs_statx(dfd, filename, flags, &stat, mask); if (error) return error; return cp_statx(&stat, buffer); } int do_statx_fd(int fd, unsigned int flags, unsigned int mask, struct statx __user *buffer) { struct kstat stat; int error; if (mask & STATX__RESERVED) return -EINVAL; if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) return -EINVAL; /* * STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests * from userland. */ mask &= ~STATX_CHANGE_COOKIE; error = vfs_statx_fd(fd, flags, &stat, mask); if (error) return error; return cp_statx(&stat, buffer); } /** * sys_statx - System call to get enhanced stats * @dfd: Base directory to pathwalk from *or* fd to stat. * @filename: File to stat or either NULL or "" with AT_EMPTY_PATH * @flags: AT_* flags to control pathwalk. * @mask: Parts of statx struct actually required. * @buffer: Result buffer. * * Note that fstat() can be emulated by setting dfd to the fd of interest, * supplying "" (or preferably NULL) as the filename and setting AT_EMPTY_PATH * in the flags. */ SYSCALL_DEFINE5(statx, int, dfd, const char __user *, filename, unsigned, flags, unsigned int, mask, struct statx __user *, buffer) { int ret; struct filename *name = getname_maybe_null(filename, flags); if (!name && dfd >= 0) return do_statx_fd(dfd, flags & ~AT_NO_AUTOMOUNT, mask, buffer); ret = do_statx(dfd, name, flags, mask, buffer); putname(name); return ret; } #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) { struct compat_stat tmp; if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) return -EOVERFLOW; if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) return -EOVERFLOW; memset(&tmp, 0, sizeof(tmp)); tmp.st_dev = new_encode_dev(stat->dev); tmp.st_ino = stat->ino; if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) return -EOVERFLOW; tmp.st_mode = stat->mode; tmp.st_nlink = stat->nlink; if (tmp.st_nlink != stat->nlink) return -EOVERFLOW; SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); tmp.st_rdev = new_encode_dev(stat->rdev); if ((u64) stat->size > MAX_NON_LFS) return -EOVERFLOW; tmp.st_size = stat->size; tmp.st_atime = stat->atime.tv_sec; tmp.st_atime_nsec = stat->atime.tv_nsec; tmp.st_mtime = stat->mtime.tv_sec; tmp.st_mtime_nsec = stat->mtime.tv_nsec; tmp.st_ctime = stat->ctime.tv_sec; tmp.st_ctime_nsec = stat->ctime.tv_nsec; tmp.st_blocks = stat->blocks; tmp.st_blksize = stat->blksize; return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; } COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, struct compat_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_stat(filename, &stat); if (error) return error; return cp_compat_stat(&stat, statbuf); } COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, struct compat_stat __user *, statbuf) { struct kstat stat; int error; error = vfs_lstat(filename, &stat); if (error) return error; return cp_compat_stat(&stat, statbuf); } #ifndef __ARCH_WANT_STAT64 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, const char __user *, filename, struct compat_stat __user *, statbuf, int, flag) { struct kstat stat; int error; error = vfs_fstatat(dfd, filename, &stat, flag); if (error) return error; return cp_compat_stat(&stat, statbuf); } #endif COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct compat_stat __user *, statbuf) { struct kstat stat; int error = vfs_fstat(fd, &stat); if (!error) error = cp_compat_stat(&stat, statbuf); return error; } #endif /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ void __inode_add_bytes(struct inode *inode, loff_t bytes) { inode->i_blocks += bytes >> 9; bytes &= 511; inode->i_bytes += bytes; if (inode->i_bytes >= 512) { inode->i_blocks++; inode->i_bytes -= 512; } } EXPORT_SYMBOL(__inode_add_bytes); void inode_add_bytes(struct inode *inode, loff_t bytes) { spin_lock(&inode->i_lock); __inode_add_bytes(inode, bytes); spin_unlock(&inode->i_lock); } EXPORT_SYMBOL(inode_add_bytes); void __inode_sub_bytes(struct inode *inode, loff_t bytes) { inode->i_blocks -= bytes >> 9; bytes &= 511; if (inode->i_bytes < bytes) { inode->i_blocks--; inode->i_bytes += 512; } inode->i_bytes -= bytes; } EXPORT_SYMBOL(__inode_sub_bytes); void inode_sub_bytes(struct inode *inode, loff_t bytes) { spin_lock(&inode->i_lock); __inode_sub_bytes(inode, bytes); spin_unlock(&inode->i_lock); } EXPORT_SYMBOL(inode_sub_bytes); loff_t inode_get_bytes(struct inode *inode) { loff_t ret; spin_lock(&inode->i_lock); ret = __inode_get_bytes(inode); spin_unlock(&inode->i_lock); return ret; } EXPORT_SYMBOL(inode_get_bytes); void inode_set_bytes(struct inode *inode, loff_t bytes) { /* Caller is here responsible for sufficient locking * (ie. inode->i_lock) */ inode->i_blocks = bytes >> 9; inode->i_bytes = bytes & 511; } EXPORT_SYMBOL(inode_set_bytes); |
| 25 25 25 24 24 2 1 1 1 2 24 24 24 2 2 2 1 1 1 2 1 1 1 2 1 1 1 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 | // SPDX-License-Identifier: GPL-2.0-only /* * vivid-kthread-cap.h - video/vbi capture thread support functions. * * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/font.h> #include <linux/mutex.h> #include <linux/videodev2.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/random.h> #include <linux/v4l2-dv-timings.h> #include <linux/jiffies.h> #include <asm/div64.h> #include <media/videobuf2-vmalloc.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include <media/v4l2-rect.h> #include "vivid-core.h" #include "vivid-vid-common.h" #include "vivid-vid-cap.h" #include "vivid-vid-out.h" #include "vivid-radio-common.h" #include "vivid-radio-rx.h" #include "vivid-radio-tx.h" #include "vivid-sdr-cap.h" #include "vivid-vbi-cap.h" #include "vivid-vbi-out.h" #include "vivid-osd.h" #include "vivid-ctrls.h" #include "vivid-kthread-cap.h" #include "vivid-meta-cap.h" static inline v4l2_std_id vivid_get_std_cap(const struct vivid_dev *dev) { if (vivid_is_sdtv_cap(dev)) return dev->std_cap[dev->input]; return 0; } static void copy_pix(struct vivid_dev *dev, int win_y, int win_x, u16 *cap, const u16 *osd) { u16 out; out = *cap; *cap = *osd; if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_CHROMAKEY) && *osd != dev->chromakey_out) return; if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) && out == dev->chromakey_out) return; if (dev->fmt_cap->alpha_mask) { if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_GLOBAL_ALPHA) && dev->global_alpha_out) return; if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) && *cap & dev->fmt_cap->alpha_mask) return; if ((dev->fbuf_out_flags & V4L2_FBUF_FLAG_LOCAL_INV_ALPHA) && !(*cap & dev->fmt_cap->alpha_mask)) return; } *cap = out; } static void blend_line(struct vivid_dev *dev, unsigned y_offset, unsigned x_offset, u8 *vcapbuf, const u8 *vosdbuf, unsigned width, unsigned pixsize) { unsigned x; for (x = 0; x < width; x++, vcapbuf += pixsize, vosdbuf += pixsize) { copy_pix(dev, y_offset, x_offset + x, (u16 *)vcapbuf, (const u16 *)vosdbuf); } } static void scale_line(const u8 *src, u8 *dst, unsigned srcw, unsigned dstw, unsigned twopixsize) { /* Coarse scaling with Bresenham */ unsigned int_part; unsigned fract_part; unsigned src_x = 0; unsigned error = 0; unsigned x; /* * We always combine two pixels to prevent color bleed in the packed * yuv case. */ srcw /= 2; dstw /= 2; int_part = srcw / dstw; fract_part = srcw % dstw; for (x = 0; x < dstw; x++, dst += twopixsize) { memcpy(dst, src + src_x * twopixsize, twopixsize); src_x += int_part; error += fract_part; if (error >= dstw) { error -= dstw; src_x++; } } } /* * Precalculate the rectangles needed to perform video looping: * * The nominal pipeline is that the video output buffer is cropped by * crop_out, scaled to compose_out, overlaid with the output overlay, * cropped on the capture side by crop_cap and scaled again to the video * capture buffer using compose_cap. * * To keep things efficient we calculate the intersection of compose_out * and crop_cap (since that's the only part of the video that will * actually end up in the capture buffer), determine which part of the * video output buffer that is and which part of the video capture buffer * so we can scale the video straight from the output buffer to the capture * buffer without any intermediate steps. * * If we need to deal with an output overlay, then there is no choice and * that intermediate step still has to be taken. For the output overlay * support we calculate the intersection of the framebuffer and the overlay * window (which may be partially or wholly outside of the framebuffer * itself) and the intersection of that with loop_vid_copy (i.e. the part of * the actual looped video that will be overlaid). The result is calculated * both in framebuffer coordinates (loop_fb_copy) and compose_out coordinates * (loop_vid_overlay). Finally calculate the part of the capture buffer that * will receive that overlaid video. */ static void vivid_precalc_copy_rects(struct vivid_dev *dev, struct vivid_dev *out_dev) { /* Framebuffer rectangle */ struct v4l2_rect r_fb = { 0, 0, dev->display_width, dev->display_height }; /* Overlay window rectangle in framebuffer coordinates */ struct v4l2_rect r_overlay = { out_dev->overlay_out_left, out_dev->overlay_out_top, out_dev->compose_out.width, out_dev->compose_out.height }; v4l2_rect_intersect(&dev->loop_vid_copy, &dev->crop_cap, &out_dev->compose_out); dev->loop_vid_out = dev->loop_vid_copy; v4l2_rect_scale(&dev->loop_vid_out, &out_dev->compose_out, &out_dev->crop_out); dev->loop_vid_out.left += out_dev->crop_out.left; dev->loop_vid_out.top += out_dev->crop_out.top; dev->loop_vid_cap = dev->loop_vid_copy; v4l2_rect_scale(&dev->loop_vid_cap, &dev->crop_cap, &dev->compose_cap); dprintk(dev, 1, "loop_vid_copy: (%d,%d)/%ux%u loop_vid_out: (%d,%d)/%ux%u loop_vid_cap: (%d,%d)/%ux%u\n", dev->loop_vid_copy.left, dev->loop_vid_copy.top, dev->loop_vid_copy.width, dev->loop_vid_copy.height, dev->loop_vid_out.left, dev->loop_vid_out.top, dev->loop_vid_out.width, dev->loop_vid_out.height, dev->loop_vid_cap.left, dev->loop_vid_cap.top, dev->loop_vid_cap.width, dev->loop_vid_cap.height); v4l2_rect_intersect(&r_overlay, &r_fb, &r_overlay); /* shift r_overlay to the same origin as compose_out */ r_overlay.left += out_dev->compose_out.left - out_dev->overlay_out_left; r_overlay.top += out_dev->compose_out.top - out_dev->overlay_out_top; v4l2_rect_intersect(&dev->loop_vid_overlay, &r_overlay, &dev->loop_vid_copy); dev->loop_fb_copy = dev->loop_vid_overlay; /* shift dev->loop_fb_copy back again to the fb origin */ dev->loop_fb_copy.left -= out_dev->compose_out.left - out_dev->overlay_out_left; dev->loop_fb_copy.top -= out_dev->compose_out.top - out_dev->overlay_out_top; dev->loop_vid_overlay_cap = dev->loop_vid_overlay; v4l2_rect_scale(&dev->loop_vid_overlay_cap, &dev->crop_cap, &dev->compose_cap); dprintk(dev, 1, "loop_fb_copy: (%d,%d)/%ux%u loop_vid_overlay: (%d,%d)/%ux%u loop_vid_overlay_cap: (%d,%d)/%ux%u\n", dev->loop_fb_copy.left, dev->loop_fb_copy.top, dev->loop_fb_copy.width, dev->loop_fb_copy.height, dev->loop_vid_overlay.left, dev->loop_vid_overlay.top, dev->loop_vid_overlay.width, dev->loop_vid_overlay.height, dev->loop_vid_overlay_cap.left, dev->loop_vid_overlay_cap.top, dev->loop_vid_overlay_cap.width, dev->loop_vid_overlay_cap.height); } static void *plane_vaddr(struct tpg_data *tpg, struct vivid_buffer *buf, unsigned p, unsigned bpl[TPG_MAX_PLANES], unsigned h) { unsigned i; void *vbuf; if (p == 0 || tpg_g_buffers(tpg) > 1) return vb2_plane_vaddr(&buf->vb.vb2_buf, p); vbuf = vb2_plane_vaddr(&buf->vb.vb2_buf, 0); for (i = 0; i < p; i++) vbuf += bpl[i] * h / tpg->vdownsampling[i]; return vbuf; } static noinline_for_stack int vivid_copy_buffer(struct vivid_dev *dev, struct vivid_dev *out_dev, unsigned p, u8 *vcapbuf, struct vivid_buffer *vid_cap_buf) { bool blank = dev->must_blank[vid_cap_buf->vb.vb2_buf.index]; struct tpg_data *tpg = &dev->tpg; struct vivid_buffer *vid_out_buf = NULL; unsigned vdiv = out_dev->fmt_out->vdownsampling[p]; unsigned twopixsize = tpg_g_twopixelsize(tpg, p); unsigned img_width = tpg_hdiv(tpg, p, dev->compose_cap.width); unsigned img_height = dev->compose_cap.height; unsigned stride_cap = tpg->bytesperline[p]; unsigned stride_out = out_dev->bytesperline_out[p]; unsigned stride_osd = dev->display_byte_stride; unsigned hmax = (img_height * tpg->perc_fill) / 100; u8 *voutbuf; u8 *vosdbuf = NULL; unsigned y; bool blend = out_dev->fbuf_out_flags; /* Coarse scaling with Bresenham */ unsigned vid_out_int_part; unsigned vid_out_fract_part; unsigned vid_out_y = 0; unsigned vid_out_error = 0; unsigned vid_overlay_int_part = 0; unsigned vid_overlay_fract_part = 0; unsigned vid_overlay_y = 0; unsigned vid_overlay_error = 0; unsigned vid_cap_left = tpg_hdiv(tpg, p, dev->loop_vid_cap.left); unsigned vid_cap_right; bool quick; vid_out_int_part = dev->loop_vid_out.height / dev->loop_vid_cap.height; vid_out_fract_part = dev->loop_vid_out.height % dev->loop_vid_cap.height; if (!list_empty(&out_dev->vid_out_active)) vid_out_buf = list_entry(out_dev->vid_out_active.next, struct vivid_buffer, list); if (vid_out_buf == NULL) return -ENODATA; vid_cap_buf->vb.field = vid_out_buf->vb.field; voutbuf = plane_vaddr(tpg, vid_out_buf, p, out_dev->bytesperline_out, out_dev->fmt_out_rect.height); if (p < out_dev->fmt_out->buffers) voutbuf += vid_out_buf->vb.vb2_buf.planes[p].data_offset; voutbuf += tpg_hdiv(tpg, p, dev->loop_vid_out.left) + (dev->loop_vid_out.top / vdiv) * stride_out; vcapbuf += tpg_hdiv(tpg, p, dev->compose_cap.left) + (dev->compose_cap.top / vdiv) * stride_cap; if (dev->loop_vid_copy.width == 0 || dev->loop_vid_copy.height == 0) { /* * If there is nothing to copy, then just fill the capture window * with black. */ for (y = 0; y < hmax / vdiv; y++, vcapbuf += stride_cap) memcpy(vcapbuf, tpg->black_line[p], img_width); return 0; } if (out_dev->overlay_out_enabled && dev->loop_vid_overlay.width && dev->loop_vid_overlay.height) { vosdbuf = dev->video_vbase; vosdbuf += (dev->loop_fb_copy.left * twopixsize) / 2 + dev->loop_fb_copy.top * stride_osd; vid_overlay_int_part = dev->loop_vid_overlay.height / dev->loop_vid_overlay_cap.height; vid_overlay_fract_part = dev->loop_vid_overlay.height % dev->loop_vid_overlay_cap.height; } vid_cap_right = tpg_hdiv(tpg, p, dev->loop_vid_cap.left + dev->loop_vid_cap.width); /* quick is true if no video scaling is needed */ quick = dev->loop_vid_out.width == dev->loop_vid_cap.width; dev->cur_scaled_line = dev->loop_vid_out.height; for (y = 0; y < hmax; y += vdiv, vcapbuf += stride_cap) { /* osdline is true if this line requires overlay blending */ bool osdline = vosdbuf && y >= dev->loop_vid_overlay_cap.top && y < dev->loop_vid_overlay_cap.top + dev->loop_vid_overlay_cap.height; /* * If this line of the capture buffer doesn't get any video, then * just fill with black. */ if (y < dev->loop_vid_cap.top || y >= dev->loop_vid_cap.top + dev->loop_vid_cap.height) { memcpy(vcapbuf, tpg->black_line[p], img_width); continue; } /* fill the left border with black */ if (dev->loop_vid_cap.left) memcpy(vcapbuf, tpg->black_line[p], vid_cap_left); /* fill the right border with black */ if (vid_cap_right < img_width) memcpy(vcapbuf + vid_cap_right, tpg->black_line[p], img_width - vid_cap_right); if (quick && !osdline) { memcpy(vcapbuf + vid_cap_left, voutbuf + vid_out_y * stride_out, tpg_hdiv(tpg, p, dev->loop_vid_cap.width)); goto update_vid_out_y; } if (dev->cur_scaled_line == vid_out_y) { memcpy(vcapbuf + vid_cap_left, dev->scaled_line, tpg_hdiv(tpg, p, dev->loop_vid_cap.width)); goto update_vid_out_y; } if (!osdline) { scale_line(voutbuf + vid_out_y * stride_out, dev->scaled_line, tpg_hdiv(tpg, p, dev->loop_vid_out.width), tpg_hdiv(tpg, p, dev->loop_vid_cap.width), tpg_g_twopixelsize(tpg, p)); } else { /* * Offset in bytes within loop_vid_copy to the start of the * loop_vid_overlay rectangle. */ unsigned offset = ((dev->loop_vid_overlay.left - dev->loop_vid_copy.left) * twopixsize) / 2; u8 *osd = vosdbuf + vid_overlay_y * stride_osd; scale_line(voutbuf + vid_out_y * stride_out, dev->blended_line, dev->loop_vid_out.width, dev->loop_vid_copy.width, tpg_g_twopixelsize(tpg, p)); if (blend) blend_line(dev, vid_overlay_y + dev->loop_vid_overlay.top, dev->loop_vid_overlay.left, dev->blended_line + offset, osd, dev->loop_vid_overlay.width, twopixsize / 2); else memcpy(dev->blended_line + offset, osd, (dev->loop_vid_overlay.width * twopixsize) / 2); scale_line(dev->blended_line, dev->scaled_line, dev->loop_vid_copy.width, dev->loop_vid_cap.width, tpg_g_twopixelsize(tpg, p)); } dev->cur_scaled_line = vid_out_y; memcpy(vcapbuf + vid_cap_left, dev->scaled_line, tpg_hdiv(tpg, p, dev->loop_vid_cap.width)); update_vid_out_y: if (osdline) { vid_overlay_y += vid_overlay_int_part; vid_overlay_error += vid_overlay_fract_part; if (vid_overlay_error >= dev->loop_vid_overlay_cap.height) { vid_overlay_error -= dev->loop_vid_overlay_cap.height; vid_overlay_y++; } } vid_out_y += vid_out_int_part; vid_out_error += vid_out_fract_part; if (vid_out_error >= dev->loop_vid_cap.height / vdiv) { vid_out_error -= dev->loop_vid_cap.height / vdiv; vid_out_y++; } } if (!blank) return 0; for (; y < img_height; y += vdiv, vcapbuf += stride_cap) memcpy(vcapbuf, tpg->contrast_line[p], img_width); return 0; } static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf) { struct vivid_dev *out_dev = NULL; struct tpg_data *tpg = &dev->tpg; unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; unsigned line_height = 16 / factor; bool is_tv = vivid_is_sdtv_cap(dev); bool is_60hz = is_tv && (dev->std_cap[dev->input] & V4L2_STD_525_60); unsigned p; int line = 1; u8 *basep[TPG_MAX_PLANES][2]; unsigned ms; char str[100]; s32 gain; buf->vb.sequence = dev->vid_cap_seq_count; v4l2_ctrl_s_ctrl(dev->ro_int32, buf->vb.sequence & 0xff); if (dev->field_cap == V4L2_FIELD_ALTERNATE) { /* * 60 Hz standards start with the bottom field, 50 Hz standards * with the top field. So if the 0-based seq_count is even, * then the field is TOP for 50 Hz and BOTTOM for 60 Hz * standards. */ buf->vb.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP; /* * The sequence counter counts frames, not fields. So divide * by two. */ buf->vb.sequence /= 2; } else { buf->vb.field = dev->field_cap; } tpg_s_field(tpg, buf->vb.field, dev->field_cap == V4L2_FIELD_ALTERNATE); tpg_s_perc_fill_blank(tpg, dev->must_blank[buf->vb.vb2_buf.index]); if (vivid_vid_can_loop(dev) && ((vivid_is_svid_cap(dev) && !VIVID_INVALID_SIGNAL(dev->std_signal_mode[dev->input])) || (vivid_is_hdmi_cap(dev) && !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode[dev->input])))) { out_dev = vivid_input_is_connected_to(dev); /* * If the vivid instance of the output device is different * from the vivid instance of this input device, then we * must take care to properly serialize the output device to * prevent that the buffer we are copying from is being freed. * * If the output device is part of the same instance, then the * lock is already taken and there is no need to take the mutex. * * The problem with taking the mutex is that you can get * deadlocked if instance A locks instance B and vice versa. * It is not really worth trying to be very smart about this, * so just try to take the lock, and if you can't, then just * set out_dev to NULL and you will end up with a single frame * of Noise (the default test pattern in this case). */ if (out_dev && dev != out_dev && !mutex_trylock(&out_dev->mutex)) out_dev = NULL; } if (out_dev) vivid_precalc_copy_rects(dev, out_dev); for (p = 0; p < tpg_g_planes(tpg); p++) { void *vbuf = plane_vaddr(tpg, buf, p, tpg->bytesperline, tpg->buf_height); /* * The first plane of a multiplanar format has a non-zero * data_offset. This helps testing whether the application * correctly supports non-zero data offsets. */ if (p < tpg_g_buffers(tpg) && dev->fmt_cap->data_offset[p]) { memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff, dev->fmt_cap->data_offset[p]); vbuf += dev->fmt_cap->data_offset[p]; } tpg_calc_text_basep(tpg, basep, p, vbuf); if (!out_dev || vivid_copy_buffer(dev, out_dev, p, vbuf, buf)) tpg_fill_plane_buffer(tpg, vivid_get_std_cap(dev), p, vbuf); } if (out_dev && dev != out_dev) mutex_unlock(&out_dev->mutex); dev->must_blank[buf->vb.vb2_buf.index] = false; /* Updates stream time, only update at the start of a new frame. */ if (dev->field_cap != V4L2_FIELD_ALTERNATE || (dev->vid_cap_seq_count & 1) == 0) dev->ms_vid_cap = jiffies_to_msecs(jiffies - dev->jiffies_vid_cap); ms = dev->ms_vid_cap; if (dev->osd_mode <= 1) { snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d %u%s", (ms / (60 * 60 * 1000)) % 24, (ms / (60 * 1000)) % 60, (ms / 1000) % 60, ms % 1000, buf->vb.sequence, (dev->field_cap == V4L2_FIELD_ALTERNATE) ? (buf->vb.field == V4L2_FIELD_TOP ? " top" : " bottom") : ""); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); } if (dev->osd_mode == 0) { snprintf(str, sizeof(str), " %dx%d, input %d ", dev->src_rect.width, dev->src_rect.height, dev->input); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); gain = v4l2_ctrl_g_ctrl(dev->gain); mutex_lock(dev->ctrl_hdl_user_vid.lock); snprintf(str, sizeof(str), " brightness %3d, contrast %3d, saturation %3d, hue %d ", dev->brightness->cur.val, dev->contrast->cur.val, dev->saturation->cur.val, dev->hue->cur.val); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); snprintf(str, sizeof(str), " autogain %d, gain %3d, alpha 0x%02x ", dev->autogain->cur.val, gain, dev->alpha->cur.val); mutex_unlock(dev->ctrl_hdl_user_vid.lock); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); mutex_lock(dev->ctrl_hdl_user_aud.lock); snprintf(str, sizeof(str), " volume %3d, mute %d ", dev->volume->cur.val, dev->mute->cur.val); mutex_unlock(dev->ctrl_hdl_user_aud.lock); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); mutex_lock(dev->ctrl_hdl_user_gen.lock); snprintf(str, sizeof(str), " int32 %d, ro_int32 %d, int64 %lld, bitmask %08x ", dev->int32->cur.val, dev->ro_int32->cur.val, *dev->int64->p_cur.p_s64, dev->bitmask->cur.val); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ", dev->boolean->cur.val, dev->menu->qmenu[dev->menu->cur.val], dev->string->p_cur.p_char); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); snprintf(str, sizeof(str), " integer_menu %lld, value %d ", dev->int_menu->qmenu_int[dev->int_menu->cur.val], dev->int_menu->cur.val); mutex_unlock(dev->ctrl_hdl_user_gen.lock); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); if (dev->button_pressed) { dev->button_pressed--; snprintf(str, sizeof(str), " button pressed!"); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); } if (dev->osd[0]) { if (vivid_is_hdmi_cap(dev)) { snprintf(str, sizeof(str), " OSD \"%s\"", dev->osd); tpg_gen_text(tpg, basep, line++ * line_height, 16, str); } if (dev->osd_jiffies && time_is_before_jiffies(dev->osd_jiffies + 5 * HZ)) { dev->osd[0] = 0; dev->osd_jiffies = 0; } } } } static void vivid_cap_update_frame_period(struct vivid_dev *dev) { u64 f_period; f_period = (u64)dev->timeperframe_vid_cap.numerator * 1000000000; if (WARN_ON(dev->timeperframe_vid_cap.denominator == 0)) dev->timeperframe_vid_cap.denominator = 1; do_div(f_period, dev->timeperframe_vid_cap.denominator); if (dev->field_cap == V4L2_FIELD_ALTERNATE) f_period >>= 1; /* * If "End of Frame", then offset the exposure time by 0.9 * of the frame period. */ dev->cap_frame_eof_offset = f_period * 9; do_div(dev->cap_frame_eof_offset, 10); dev->cap_frame_period = f_period; } static noinline_for_stack void vivid_thread_vid_cap_tick(struct vivid_dev *dev, int dropped_bufs) { struct vivid_buffer *vid_cap_buf = NULL; struct vivid_buffer *vbi_cap_buf = NULL; struct vivid_buffer *meta_cap_buf = NULL; u64 f_time = 0; dprintk(dev, 1, "Video Capture Thread Tick\n"); while (dropped_bufs-- > 1) tpg_update_mv_count(&dev->tpg, dev->field_cap == V4L2_FIELD_NONE || dev->field_cap == V4L2_FIELD_ALTERNATE); /* Drop a certain percentage of buffers. */ if (dev->perc_dropped_buffers && get_random_u32_below(100) < dev->perc_dropped_buffers) goto update_mv; spin_lock(&dev->slock); if (!list_empty(&dev->vid_cap_active)) { vid_cap_buf = list_entry(dev->vid_cap_active.next, struct vivid_buffer, list); list_del(&vid_cap_buf->list); } if (!list_empty(&dev->vbi_cap_active)) { if (dev->field_cap != V4L2_FIELD_ALTERNATE || (dev->vbi_cap_seq_count & 1)) { vbi_cap_buf = list_entry(dev->vbi_cap_active.next, struct vivid_buffer, list); list_del(&vbi_cap_buf->list); } } if (!list_empty(&dev->meta_cap_active)) { meta_cap_buf = list_entry(dev->meta_cap_active.next, struct vivid_buffer, list); list_del(&meta_cap_buf->list); } spin_unlock(&dev->slock); if (!vid_cap_buf && !vbi_cap_buf && !meta_cap_buf) goto update_mv; f_time = ktime_get_ns() + dev->time_wrap_offset; if (vid_cap_buf) { v4l2_ctrl_request_setup(vid_cap_buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_vid_cap); /* Fill buffer */ vivid_fillbuff(dev, vid_cap_buf); dprintk(dev, 1, "filled buffer %d\n", vid_cap_buf->vb.vb2_buf.index); v4l2_ctrl_request_complete(vid_cap_buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_vid_cap); vb2_buffer_done(&vid_cap_buf->vb.vb2_buf, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "vid_cap buffer %d done\n", vid_cap_buf->vb.vb2_buf.index); vid_cap_buf->vb.vb2_buf.timestamp = f_time; if (!dev->tstamp_src_is_soe) vid_cap_buf->vb.vb2_buf.timestamp += dev->cap_frame_eof_offset; } if (vbi_cap_buf) { u64 vbi_period; v4l2_ctrl_request_setup(vbi_cap_buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_vbi_cap); if (vbi_cap_buf->vb.vb2_buf.type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) vivid_sliced_vbi_cap_process(dev, vbi_cap_buf); else vivid_raw_vbi_cap_process(dev, vbi_cap_buf); v4l2_ctrl_request_complete(vbi_cap_buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_vbi_cap); vb2_buffer_done(&vbi_cap_buf->vb.vb2_buf, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "vbi_cap %d done\n", vbi_cap_buf->vb.vb2_buf.index); /* If capturing a VBI, offset by 0.05 */ vbi_period = dev->cap_frame_period * 5; do_div(vbi_period, 100); vbi_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset + vbi_period; } if (meta_cap_buf) { v4l2_ctrl_request_setup(meta_cap_buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_meta_cap); vivid_meta_cap_fillbuff(dev, meta_cap_buf, f_time); v4l2_ctrl_request_complete(meta_cap_buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_meta_cap); vb2_buffer_done(&meta_cap_buf->vb.vb2_buf, dev->dqbuf_error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); dprintk(dev, 2, "meta_cap %d done\n", meta_cap_buf->vb.vb2_buf.index); meta_cap_buf->vb.vb2_buf.timestamp = f_time + dev->cap_frame_eof_offset; } dev->dqbuf_error = false; update_mv: /* Update the test pattern movement counters */ tpg_update_mv_count(&dev->tpg, dev->field_cap == V4L2_FIELD_NONE || dev->field_cap == V4L2_FIELD_ALTERNATE); } static int vivid_thread_vid_cap(void *data) { struct vivid_dev *dev = data; u64 numerators_since_start; u64 buffers_since_start; u64 next_jiffies_since_start; unsigned long jiffies_since_start; unsigned long cur_jiffies; unsigned wait_jiffies; unsigned numerator; unsigned denominator; int dropped_bufs; dprintk(dev, 1, "Video Capture Thread Start\n"); set_freezable(); /* Resets frame counters */ dev->cap_seq_offset = 0; dev->cap_seq_count = 0; dev->cap_seq_resync = false; dev->jiffies_vid_cap = jiffies; dev->cap_stream_start = ktime_get_ns(); if (dev->time_wrap) dev->time_wrap_offset = dev->time_wrap - dev->cap_stream_start; else dev->time_wrap_offset = 0; vivid_cap_update_frame_period(dev); for (;;) { try_to_freeze(); if (kthread_should_stop()) break; if (!mutex_trylock(&dev->mutex)) { schedule(); continue; } cur_jiffies = jiffies; if (dev->cap_seq_resync) { dev->jiffies_vid_cap = cur_jiffies; dev->cap_seq_offset = dev->cap_seq_count + 1; dev->cap_seq_count = 0; dev->cap_stream_start += dev->cap_frame_period * dev->cap_seq_offset; vivid_cap_update_frame_period(dev); dev->cap_seq_resync = false; } numerator = dev->timeperframe_vid_cap.numerator; denominator = dev->timeperframe_vid_cap.denominator; if (dev->field_cap == V4L2_FIELD_ALTERNATE) denominator *= 2; /* Calculate the number of jiffies since we started streaming */ jiffies_since_start = cur_jiffies - dev->jiffies_vid_cap; /* Get the number of buffers streamed since the start */ buffers_since_start = (u64)jiffies_since_start * denominator + (HZ * numerator) / 2; do_div(buffers_since_start, HZ * numerator); /* * After more than 0xf0000000 (rounded down to a multiple of * 'jiffies-per-day' to ease jiffies_to_msecs calculation) * jiffies have passed since we started streaming reset the * counters and keep track of the sequence offset. */ if (jiffies_since_start > JIFFIES_RESYNC) { dev->jiffies_vid_cap = cur_jiffies; dev->cap_seq_offset = buffers_since_start; buffers_since_start = 0; } dropped_bufs = buffers_since_start + dev->cap_seq_offset - dev->cap_seq_count; dev->cap_seq_count = buffers_since_start + dev->cap_seq_offset; dev->vid_cap_seq_count = dev->cap_seq_count - dev->vid_cap_seq_start; dev->vbi_cap_seq_count = dev->cap_seq_count - dev->vbi_cap_seq_start; dev->meta_cap_seq_count = dev->cap_seq_count - dev->meta_cap_seq_start; vivid_thread_vid_cap_tick(dev, dropped_bufs); /* * Calculate the number of 'numerators' streamed since we started, * including the current buffer. */ numerators_since_start = ++buffers_since_start * numerator; /* And the number of jiffies since we started */ jiffies_since_start = jiffies - dev->jiffies_vid_cap; mutex_unlock(&dev->mutex); /* * Calculate when that next buffer is supposed to start * in jiffies since we started streaming. */ next_jiffies_since_start = numerators_since_start * HZ + denominator / 2; do_div(next_jiffies_since_start, denominator); /* If it is in the past, then just schedule asap */ if (next_jiffies_since_start < jiffies_since_start) next_jiffies_since_start = jiffies_since_start; wait_jiffies = next_jiffies_since_start - jiffies_since_start; if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) continue; wait_queue_head_t wait; init_waitqueue_head(&wait); wait_event_interruptible_timeout(wait, kthread_should_stop(), cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "Video Capture Thread End\n"); return 0; } static void vivid_grab_controls(struct vivid_dev *dev, bool grab) { v4l2_ctrl_grab(dev->ctrl_has_crop_cap, grab); v4l2_ctrl_grab(dev->ctrl_has_compose_cap, grab); v4l2_ctrl_grab(dev->ctrl_has_scaler_cap, grab); } int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) { dprintk(dev, 1, "%s\n", __func__); if (dev->kthread_vid_cap) { u32 seq_count = dev->cap_seq_count + dev->seq_wrap * 128; if (pstreaming == &dev->vid_cap_streaming) dev->vid_cap_seq_start = seq_count; else if (pstreaming == &dev->vbi_cap_streaming) dev->vbi_cap_seq_start = seq_count; else dev->meta_cap_seq_start = seq_count; *pstreaming = true; return 0; } /* Resets frame counters */ tpg_init_mv_count(&dev->tpg); dev->vid_cap_seq_start = dev->seq_wrap * 128; dev->vbi_cap_seq_start = dev->seq_wrap * 128; dev->meta_cap_seq_start = dev->seq_wrap * 128; dev->kthread_vid_cap = kthread_run(vivid_thread_vid_cap, dev, "%s-vid-cap", dev->v4l2_dev.name); if (IS_ERR(dev->kthread_vid_cap)) { int err = PTR_ERR(dev->kthread_vid_cap); dev->kthread_vid_cap = NULL; v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n"); return err; } *pstreaming = true; vivid_grab_controls(dev, true); dprintk(dev, 1, "returning from %s\n", __func__); return 0; } void vivid_stop_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming) { dprintk(dev, 1, "%s\n", __func__); if (dev->kthread_vid_cap == NULL) return; *pstreaming = false; if (pstreaming == &dev->vid_cap_streaming) { /* Release all active buffers */ while (!list_empty(&dev->vid_cap_active)) { struct vivid_buffer *buf; buf = list_entry(dev->vid_cap_active.next, struct vivid_buffer, list); list_del(&buf->list); v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_vid_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "vid_cap buffer %d done\n", buf->vb.vb2_buf.index); } } if (pstreaming == &dev->vbi_cap_streaming) { while (!list_empty(&dev->vbi_cap_active)) { struct vivid_buffer *buf; buf = list_entry(dev->vbi_cap_active.next, struct vivid_buffer, list); list_del(&buf->list); v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_vbi_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "vbi_cap buffer %d done\n", buf->vb.vb2_buf.index); } } if (pstreaming == &dev->meta_cap_streaming) { while (!list_empty(&dev->meta_cap_active)) { struct vivid_buffer *buf; buf = list_entry(dev->meta_cap_active.next, struct vivid_buffer, list); list_del(&buf->list); v4l2_ctrl_request_complete(buf->vb.vb2_buf.req_obj.req, &dev->ctrl_hdl_meta_cap); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); dprintk(dev, 2, "meta_cap buffer %d done\n", buf->vb.vb2_buf.index); } } if (dev->vid_cap_streaming || dev->vbi_cap_streaming || dev->meta_cap_streaming) return; /* shutdown control thread */ vivid_grab_controls(dev, false); kthread_stop(dev->kthread_vid_cap); dev->kthread_vid_cap = NULL; } |
| 3 3 2 2 3 3 3 3 3 2 3 15 15 15 15 173 173 124 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 | /* SPDX-License-Identifier: GPL-2.0 * * page_pool/helpers.h * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com> * Copyright (C) 2016 Red Hat, Inc. */ /** * DOC: page_pool allocator * * The page_pool allocator is optimized for recycling page or page fragment used * by skb packet and xdp frame. * * Basic use involves replacing any alloc_pages() calls with page_pool_alloc(), * which allocate memory with or without page splitting depending on the * requested memory size. * * If the driver knows that it always requires full pages or its allocations are * always smaller than half a page, it can use one of the more specific API * calls: * * 1. page_pool_alloc_pages(): allocate memory without page splitting when * driver knows that the memory it need is always bigger than half of the page * allocated from page pool. There is no cache line dirtying for 'struct page' * when a page is recycled back to the page pool. * * 2. page_pool_alloc_frag(): allocate memory with page splitting when driver * knows that the memory it need is always smaller than or equal to half of the * page allocated from page pool. Page splitting enables memory saving and thus * avoids TLB/cache miss for data access, but there also is some cost to * implement page splitting, mainly some cache line dirtying/bouncing for * 'struct page' and atomic operation for page->pp_ref_count. * * The API keeps track of in-flight pages, in order to let API users know when * it is safe to free a page_pool object, the API users must call * page_pool_put_page() or page_pool_free_va() to free the page_pool object, or * attach the page_pool object to a page_pool-aware object like skbs marked with * skb_mark_for_recycle(). * * page_pool_put_page() may be called multiple times on the same page if a page * is split into multiple fragments. For the last fragment, it will either * recycle the page, or in case of page->_refcount > 1, it will release the DMA * mapping and in-flight state accounting. * * dma_sync_single_range_for_device() is only called for the last fragment when * page_pool is created with PP_FLAG_DMA_SYNC_DEV flag, so it depends on the * last freed fragment to do the sync_for_device operation for all fragments in * the same page when a page is split. The API user must setup pool->p.max_len * and pool->p.offset correctly and ensure that page_pool_put_page() is called * with dma_sync_size being -1 for fragment API. */ #ifndef _NET_PAGE_POOL_HELPERS_H #define _NET_PAGE_POOL_HELPERS_H #include <linux/dma-mapping.h> #include <net/page_pool/types.h> #include <net/net_debug.h> #include <net/netmem.h> #ifdef CONFIG_PAGE_POOL_STATS /* Deprecated driver-facing API, use netlink instead */ int page_pool_ethtool_stats_get_count(void); u8 *page_pool_ethtool_stats_get_strings(u8 *data); u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats); bool page_pool_get_stats(const struct page_pool *pool, struct page_pool_stats *stats); #else static inline int page_pool_ethtool_stats_get_count(void) { return 0; } static inline u8 *page_pool_ethtool_stats_get_strings(u8 *data) { return data; } static inline u64 *page_pool_ethtool_stats_get(u64 *data, const void *stats) { return data; } #endif /** * page_pool_dev_alloc_pages() - allocate a page. * @pool: pool from which to allocate * * Get a page from the page allocator or page_pool caches. */ static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_pages(pool, gfp); } /** * page_pool_dev_alloc_frag() - allocate a page fragment. * @pool: pool from which to allocate * @offset: offset to the allocated page * @size: requested size * * Get a page fragment from the page allocator or page_pool caches. * * Return: allocated page fragment, otherwise return NULL. */ static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_frag(pool, offset, size, gfp); } static inline netmem_ref page_pool_alloc_netmem(struct page_pool *pool, unsigned int *offset, unsigned int *size, gfp_t gfp) { unsigned int max_size = PAGE_SIZE << pool->p.order; netmem_ref netmem; if ((*size << 1) > max_size) { *size = max_size; *offset = 0; return page_pool_alloc_netmems(pool, gfp); } netmem = page_pool_alloc_frag_netmem(pool, offset, *size, gfp); if (unlikely(!netmem)) return 0; /* There is very likely not enough space for another fragment, so append * the remaining size to the current fragment to avoid truesize * underestimate problem. */ if (pool->frag_offset + *size > max_size) { *size = max_size - *offset; pool->frag_offset = max_size; } return netmem; } static inline netmem_ref page_pool_dev_alloc_netmem(struct page_pool *pool, unsigned int *offset, unsigned int *size) { gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; return page_pool_alloc_netmem(pool, offset, size, gfp); } static inline struct page *page_pool_alloc(struct page_pool *pool, unsigned int *offset, unsigned int *size, gfp_t gfp) { return netmem_to_page(page_pool_alloc_netmem(pool, offset, size, gfp)); } /** * page_pool_dev_alloc() - allocate a page or a page fragment. * @pool: pool from which to allocate * @offset: offset to the allocated page * @size: in as the requested size, out as the allocated size * * Get a page or a page fragment from the page allocator or page_pool caches * depending on the requested size in order to allocate memory with least memory * utilization and performance penalty. * * Return: allocated page or page fragment, otherwise return NULL. */ static inline struct page *page_pool_dev_alloc(struct page_pool *pool, unsigned int *offset, unsigned int *size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc(pool, offset, size, gfp); } static inline void *page_pool_alloc_va(struct page_pool *pool, unsigned int *size, gfp_t gfp) { unsigned int offset; struct page *page; /* Mask off __GFP_HIGHMEM to ensure we can use page_address() */ page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); if (unlikely(!page)) return NULL; return page_address(page) + offset; } /** * page_pool_dev_alloc_va() - allocate a page or a page fragment and return its * va. * @pool: pool from which to allocate * @size: in as the requested size, out as the allocated size * * This is just a thin wrapper around the page_pool_alloc() API, and * it returns va of the allocated page or page fragment. * * Return: the va for the allocated page or page fragment, otherwise return NULL. */ static inline void *page_pool_dev_alloc_va(struct page_pool *pool, unsigned int *size) { gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN); return page_pool_alloc_va(pool, size, gfp); } /** * page_pool_get_dma_dir() - Retrieve the stored DMA direction. * @pool: pool from which page was allocated * * Get the stored dma direction. A driver might decide to store this locally * and avoid the extra cache line from page_pool to determine the direction. */ static inline enum dma_data_direction page_pool_get_dma_dir(const struct page_pool *pool) { return pool->p.dma_dir; } static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr) { atomic_long_set(netmem_get_pp_ref_count_ref(netmem), nr); } /** * page_pool_fragment_page() - split a fresh page into fragments * @page: page to split * @nr: references to set * * pp_ref_count represents the number of outstanding references to the page, * which will be freed using page_pool APIs (rather than page allocator APIs * like put_page()). Such references are usually held by page_pool-aware * objects like skbs marked for page pool recycling. * * This helper allows the caller to take (set) multiple references to a * freshly allocated page. The page must be freshly allocated (have a * pp_ref_count of 1). This is commonly done by drivers and * "fragment allocators" to save atomic operations - either when they know * upfront how many references they will need; or to take MAX references and * return the unused ones with a single atomic dec(), instead of performing * multiple atomic inc() operations. */ static inline void page_pool_fragment_page(struct page *page, long nr) { page_pool_fragment_netmem(page_to_netmem(page), nr); } static inline long page_pool_unref_netmem(netmem_ref netmem, long nr) { atomic_long_t *pp_ref_count = netmem_get_pp_ref_count_ref(netmem); long ret; /* If nr == pp_ref_count then we have cleared all remaining * references to the page: * 1. 'n == 1': no need to actually overwrite it. * 2. 'n != 1': overwrite it with one, which is the rare case * for pp_ref_count draining. * * The main advantage to doing this is that not only we avoid a atomic * update, as an atomic_read is generally a much cheaper operation than * an atomic update, especially when dealing with a page that may be * referenced by only 2 or 3 users; but also unify the pp_ref_count * handling by ensuring all pages have partitioned into only 1 piece * initially, and only overwrite it when the page is partitioned into * more than one piece. */ if (atomic_long_read(pp_ref_count) == nr) { /* As we have ensured nr is always one for constant case using * the BUILD_BUG_ON(), only need to handle the non-constant case * here for pp_ref_count draining, which is a rare case. */ BUILD_BUG_ON(__builtin_constant_p(nr) && nr != 1); if (!__builtin_constant_p(nr)) atomic_long_set(pp_ref_count, 1); return 0; } ret = atomic_long_sub_return(nr, pp_ref_count); WARN_ON(ret < 0); /* We are the last user here too, reset pp_ref_count back to 1 to * ensure all pages have been partitioned into 1 piece initially, * this should be the rare case when the last two fragment users call * page_pool_unref_page() currently. */ if (unlikely(!ret)) atomic_long_set(pp_ref_count, 1); return ret; } static inline long page_pool_unref_page(struct page *page, long nr) { return page_pool_unref_netmem(page_to_netmem(page), nr); } static inline void page_pool_ref_netmem(netmem_ref netmem) { atomic_long_inc(netmem_get_pp_ref_count_ref(netmem)); } static inline void page_pool_ref_page(struct page *page) { page_pool_ref_netmem(page_to_netmem(page)); } static inline bool page_pool_unref_and_test(netmem_ref netmem) { /* If page_pool_unref_page() returns 0, we were the last user */ return page_pool_unref_netmem(netmem, 1) == 0; } static inline void page_pool_put_netmem(struct page_pool *pool, netmem_ref netmem, unsigned int dma_sync_size, bool allow_direct) { /* When page_pool isn't compiled-in, net/core/xdp.c doesn't * allow registering MEM_TYPE_PAGE_POOL, but shield linker. */ #ifdef CONFIG_PAGE_POOL if (!page_pool_unref_and_test(netmem)) return; page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct); #endif } /** * page_pool_put_page() - release a reference to a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * @dma_sync_size: how much of the page may have been touched by the device * @allow_direct: released by the consumer, allow lockless caching * * The outcome of this depends on the page refcnt. If the driver bumps * the refcnt > 1 this will unmap the page. If the page refcnt is 1 * the allocator owns the page and will try to recycle it in one of the pool * caches. If PP_FLAG_DMA_SYNC_DEV is set, the page will be synced for_device * using dma_sync_single_range_for_device(). */ static inline void page_pool_put_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct) { page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size, allow_direct); } static inline void page_pool_put_full_netmem(struct page_pool *pool, netmem_ref netmem, bool allow_direct) { page_pool_put_netmem(pool, netmem, -1, allow_direct); } /** * page_pool_put_full_page() - release a reference on a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * @allow_direct: released by the consumer, allow lockless caching * * Similar to page_pool_put_page(), but will DMA sync the entire memory area * as configured in &page_pool_params.max_len. */ static inline void page_pool_put_full_page(struct page_pool *pool, struct page *page, bool allow_direct) { page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct); } /** * page_pool_recycle_direct() - release a reference on a page pool page * @pool: pool from which page was allocated * @page: page to release a reference on * * Similar to page_pool_put_full_page() but caller must guarantee safe context * (e.g NAPI), since it will recycle the page directly into the pool fast cache. */ static inline void page_pool_recycle_direct(struct page_pool *pool, struct page *page) { page_pool_put_full_page(pool, page, true); } static inline void page_pool_recycle_direct_netmem(struct page_pool *pool, netmem_ref netmem) { page_pool_put_full_netmem(pool, netmem, true); } #define PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA \ (sizeof(dma_addr_t) > sizeof(unsigned long)) /** * page_pool_free_va() - free a va into the page_pool * @pool: pool from which va was allocated * @va: va to be freed * @allow_direct: freed by the consumer, allow lockless caching * * Free a va allocated from page_pool_allo_va(). */ static inline void page_pool_free_va(struct page_pool *pool, void *va, bool allow_direct) { page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); } static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem) { dma_addr_t ret = netmem_get_dma_addr(netmem); if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) ret <<= PAGE_SHIFT; return ret; } /** * page_pool_get_dma_addr() - Retrieve the stored DMA address. * @page: page allocated from a page pool * * Fetch the DMA address of the page. The page pool to which the page belongs * must had been created with PP_FLAG_DMA_MAP. */ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) { dma_addr_t ret = page->dma_addr; if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) ret <<= PAGE_SHIFT; return ret; } static inline void __page_pool_dma_sync_for_cpu(const struct page_pool *pool, const dma_addr_t dma_addr, u32 offset, u32 dma_sync_size) { dma_sync_single_range_for_cpu(pool->p.dev, dma_addr, offset + pool->p.offset, dma_sync_size, page_pool_get_dma_dir(pool)); } /** * page_pool_dma_sync_for_cpu - sync Rx page for CPU after it's written by HW * @pool: &page_pool the @page belongs to * @page: page to sync * @offset: offset from page start to "hard" start if using PP frags * @dma_sync_size: size of the data written to the page * * Can be used as a shorthand to sync Rx pages before accessing them in the * driver. Caller must ensure the pool was created with ``PP_FLAG_DMA_MAP``. * Note that this version performs DMA sync unconditionally, even if the * associated PP doesn't perform sync-for-device. */ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, const struct page *page, u32 offset, u32 dma_sync_size) { __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr(page), offset, dma_sync_size); } static inline void page_pool_dma_sync_netmem_for_cpu(const struct page_pool *pool, const netmem_ref netmem, u32 offset, u32 dma_sync_size) { if (!pool->dma_sync_for_cpu) return; __page_pool_dma_sync_for_cpu(pool, page_pool_get_dma_addr_netmem(netmem), offset, dma_sync_size); } static inline bool page_pool_put(struct page_pool *pool) { return refcount_dec_and_test(&pool->user_cnt); } static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid) { if (unlikely(pool->p.nid != new_nid)) page_pool_update_nid(pool, new_nid); } static inline bool page_pool_is_unreadable(struct page_pool *pool) { return !!pool->mp_ops; } #endif /* _NET_PAGE_POOL_HELPERS_H */ |
| 3 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 | #ifndef __NET_NSH_H #define __NET_NSH_H 1 #include <linux/skbuff.h> /* * Network Service Header: * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |Ver|O|U| TTL | Length |U|U|U|U|MD Type| Next Protocol | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Service Path Identifier (SPI) | Service Index | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | | * ~ Mandatory/Optional Context Headers ~ * | | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Version: The version field is used to ensure backward compatibility * going forward with future NSH specification updates. It MUST be set * to 0x0 by the sender, in this first revision of NSH. Given the * widespread implementation of existing hardware that uses the first * nibble after an MPLS label stack for ECMP decision processing, this * document reserves version 01b and this value MUST NOT be used in * future versions of the protocol. Please see [RFC7325] for further * discussion of MPLS-related forwarding requirements. * * O bit: Setting this bit indicates an Operations, Administration, and * Maintenance (OAM) packet. The actual format and processing of SFC * OAM packets is outside the scope of this specification (see for * example [I-D.ietf-sfc-oam-framework] for one approach). * * The O bit MUST be set for OAM packets and MUST NOT be set for non-OAM * packets. The O bit MUST NOT be modified along the SFP. * * SF/SFF/SFC Proxy/Classifier implementations that do not support SFC * OAM procedures SHOULD discard packets with O bit set, but MAY support * a configurable parameter to enable forwarding received SFC OAM * packets unmodified to the next element in the chain. Forwarding OAM * packets unmodified by SFC elements that do not support SFC OAM * procedures may be acceptable for a subset of OAM functions, but can * result in unexpected outcomes for others, thus it is recommended to * analyze the impact of forwarding an OAM packet for all OAM functions * prior to enabling this behavior. The configurable parameter MUST be * disabled by default. * * TTL: Indicates the maximum SFF hops for an SFP. This field is used * for service plane loop detection. The initial TTL value SHOULD be * configurable via the control plane; the configured initial value can * be specific to one or more SFPs. If no initial value is explicitly * provided, the default initial TTL value of 63 MUST be used. Each SFF * involved in forwarding an NSH packet MUST decrement the TTL value by * 1 prior to NSH forwarding lookup. Decrementing by 1 from an incoming * value of 0 shall result in a TTL value of 63. The packet MUST NOT be * forwarded if TTL is, after decrement, 0. * * All other flag fields, marked U, are unassigned and available for * future use, see Section 11.2.1. Unassigned bits MUST be set to zero * upon origination, and MUST be ignored and preserved unmodified by * other NSH supporting elements. Elements which do not understand the * meaning of any of these bits MUST NOT modify their actions based on * those unknown bits. * * Length: The total length, in 4-byte words, of NSH including the Base * Header, the Service Path Header, the Fixed Length Context Header or * Variable Length Context Header(s). The length MUST be 0x6 for MD * Type equal to 0x1, and MUST be 0x2 or greater for MD Type equal to * 0x2. The length of the NSH header MUST be an integer multiple of 4 * bytes, thus variable length metadata is always padded out to a * multiple of 4 bytes. * * MD Type: Indicates the format of NSH beyond the mandatory Base Header * and the Service Path Header. MD Type defines the format of the * metadata being carried. * * 0x0 - This is a reserved value. Implementations SHOULD silently * discard packets with MD Type 0x0. * * 0x1 - This indicates that the format of the header includes a fixed * length Context Header (see Figure 4 below). * * 0x2 - This does not mandate any headers beyond the Base Header and * Service Path Header, but may contain optional variable length Context * Header(s). The semantics of the variable length Context Header(s) * are not defined in this document. The format of the optional * variable length Context Headers is provided in Section 2.5.1. * * 0xF - This value is reserved for experimentation and testing, as per * [RFC3692]. Implementations not explicitly configured to be part of * an experiment SHOULD silently discard packets with MD Type 0xF. * * Next Protocol: indicates the protocol type of the encapsulated data. * NSH does not alter the inner payload, and the semantics on the inner * protocol remain unchanged due to NSH service function chaining. * Please see the IANA Considerations section below, Section 11.2.5. * * This document defines the following Next Protocol values: * * 0x1: IPv4 * 0x2: IPv6 * 0x3: Ethernet * 0x4: NSH * 0x5: MPLS * 0xFE: Experiment 1 * 0xFF: Experiment 2 * * Packets with Next Protocol values not supported SHOULD be silently * dropped by default, although an implementation MAY provide a * configuration parameter to forward them. Additionally, an * implementation not explicitly configured for a specific experiment * [RFC3692] SHOULD silently drop packets with Next Protocol values 0xFE * and 0xFF. * * Service Path Identifier (SPI): Identifies a service path. * Participating nodes MUST use this identifier for Service Function * Path selection. The initial classifier MUST set the appropriate SPI * for a given classification result. * * Service Index (SI): Provides location within the SFP. The initial * classifier for a given SFP SHOULD set the SI to 255, however the * control plane MAY configure the initial value of SI as appropriate * (i.e., taking into account the length of the service function path). * The Service Index MUST be decremented by a value of 1 by Service * Functions or by SFC Proxy nodes after performing required services * and the new decremented SI value MUST be used in the egress packet's * NSH. The initial Classifier MUST send the packet to the first SFF in * the identified SFP for forwarding along an SFP. If re-classification * occurs, and that re-classification results in a new SPI, the * (re)classifier is, in effect, the initial classifier for the * resultant SPI. * * The SI is used in conjunction the with Service Path Identifier for * Service Function Path Selection and for determining the next SFF/SF * in the path. The SI is also valuable when troubleshooting or * reporting service paths. Additionally, while the TTL field is the * main mechanism for service plane loop detection, the SI can also be * used for detecting service plane loops. * * When the Base Header specifies MD Type = 0x1, a Fixed Length Context * Header (16-bytes) MUST be present immediately following the Service * Path Header. The value of a Fixed Length Context * Header that carries no metadata MUST be set to zero. * * When the base header specifies MD Type = 0x2, zero or more Variable * Length Context Headers MAY be added, immediately following the * Service Path Header (see Figure 5). Therefore, Length = 0x2, * indicates that only the Base Header followed by the Service Path * Header are present. The optional Variable Length Context Headers * MUST be of an integer number of 4-bytes. The base header Length * field MUST be used to determine the offset to locate the original * packet or frame for SFC nodes that require access to that * information. * * The format of the optional variable length Context Headers * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Metadata Class | Type |U| Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Variable Metadata | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * Metadata Class (MD Class): Defines the scope of the 'Type' field to * provide a hierarchical namespace. The IANA Considerations * Section 11.2.4 defines how the MD Class values can be allocated to * standards bodies, vendors, and others. * * Type: Indicates the explicit type of metadata being carried. The * definition of the Type is the responsibility of the MD Class owner. * * Unassigned bit: One unassigned bit is available for future use. This * bit MUST NOT be set, and MUST be ignored on receipt. * * Length: Indicates the length of the variable metadata, in bytes. In * case the metadata length is not an integer number of 4-byte words, * the sender MUST add pad bytes immediately following the last metadata * byte to extend the metadata to an integer number of 4-byte words. * The receiver MUST round up the length field to the nearest 4-byte * word boundary, to locate and process the next field in the packet. * The receiver MUST access only those bytes in the metadata indicated * by the length field (i.e., actual number of bytes) and MUST ignore * the remaining bytes up to the nearest 4-byte word boundary. The * Length may be 0 or greater. * * A value of 0 denotes a Context Header without a Variable Metadata * field. * * [0] https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/ */ /** * struct nsh_md1_ctx - Keeps track of NSH context data * @context: NSH Contexts. */ struct nsh_md1_ctx { __be32 context[4]; }; struct nsh_md2_tlv { __be16 md_class; u8 type; u8 length; u8 md_value[]; }; struct nshhdr { __be16 ver_flags_ttl_len; u8 mdtype; u8 np; __be32 path_hdr; union { struct nsh_md1_ctx md1; struct nsh_md2_tlv md2; }; }; /* Masking NSH header fields. */ #define NSH_VER_MASK 0xc000 #define NSH_VER_SHIFT 14 #define NSH_FLAGS_MASK 0x3000 #define NSH_FLAGS_SHIFT 12 #define NSH_TTL_MASK 0x0fc0 #define NSH_TTL_SHIFT 6 #define NSH_LEN_MASK 0x003f #define NSH_LEN_SHIFT 0 #define NSH_MDTYPE_MASK 0x0f #define NSH_MDTYPE_SHIFT 0 #define NSH_SPI_MASK 0xffffff00 #define NSH_SPI_SHIFT 8 #define NSH_SI_MASK 0x000000ff #define NSH_SI_SHIFT 0 /* MD Type Registry. */ #define NSH_M_TYPE1 0x01 #define NSH_M_TYPE2 0x02 #define NSH_M_EXP1 0xFE #define NSH_M_EXP2 0xFF /* NSH Base Header Length */ #define NSH_BASE_HDR_LEN 8 /* NSH MD Type 1 header Length. */ #define NSH_M_TYPE1_LEN 24 /* NSH header maximum Length. */ #define NSH_HDR_MAX_LEN 256 /* NSH context headers maximum Length. */ #define NSH_CTX_HDRS_MAX_LEN 248 static inline struct nshhdr *nsh_hdr(struct sk_buff *skb) { return (struct nshhdr *)skb_network_header(skb); } static inline u16 nsh_hdr_len(const struct nshhdr *nsh) { return ((ntohs(nsh->ver_flags_ttl_len) & NSH_LEN_MASK) >> NSH_LEN_SHIFT) << 2; } static inline u8 nsh_get_ver(const struct nshhdr *nsh) { return (ntohs(nsh->ver_flags_ttl_len) & NSH_VER_MASK) >> NSH_VER_SHIFT; } static inline u8 nsh_get_flags(const struct nshhdr *nsh) { return (ntohs(nsh->ver_flags_ttl_len) & NSH_FLAGS_MASK) >> NSH_FLAGS_SHIFT; } static inline u8 nsh_get_ttl(const struct nshhdr *nsh) { return (ntohs(nsh->ver_flags_ttl_len) & NSH_TTL_MASK) >> NSH_TTL_SHIFT; } static inline void __nsh_set_xflag(struct nshhdr *nsh, u16 xflag, u16 xmask) { nsh->ver_flags_ttl_len = (nsh->ver_flags_ttl_len & ~htons(xmask)) | htons(xflag); } static inline void nsh_set_flags_and_ttl(struct nshhdr *nsh, u8 flags, u8 ttl) { __nsh_set_xflag(nsh, ((flags << NSH_FLAGS_SHIFT) & NSH_FLAGS_MASK) | ((ttl << NSH_TTL_SHIFT) & NSH_TTL_MASK), NSH_FLAGS_MASK | NSH_TTL_MASK); } static inline void nsh_set_flags_ttl_len(struct nshhdr *nsh, u8 flags, u8 ttl, u8 len) { len = len >> 2; __nsh_set_xflag(nsh, ((flags << NSH_FLAGS_SHIFT) & NSH_FLAGS_MASK) | ((ttl << NSH_TTL_SHIFT) & NSH_TTL_MASK) | ((len << NSH_LEN_SHIFT) & NSH_LEN_MASK), NSH_FLAGS_MASK | NSH_TTL_MASK | NSH_LEN_MASK); } int nsh_push(struct sk_buff *skb, const struct nshhdr *pushed_nh); int nsh_pop(struct sk_buff *skb); #endif /* __NET_NSH_H */ |
| 1 1 1 1 1 3 3 3 3 3 3 2 2 2 2 1 3 2 1 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 | // SPDX-License-Identifier: GPL-2.0-only /* * Hauppauge HD PVR USB driver * * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2008 Janne Grunau (j@jannau.net) * Copyright (C) 2008 John Poet */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/atomic.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include "hdpvr.h" static int video_nr[HDPVR_MAX] = {[0 ... (HDPVR_MAX - 1)] = UNSET}; module_param_array(video_nr, int, NULL, 0); MODULE_PARM_DESC(video_nr, "video device number (-1=Auto)"); /* holds the number of currently registered devices */ static atomic_t dev_nr = ATOMIC_INIT(-1); int hdpvr_debug; module_param(hdpvr_debug, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hdpvr_debug, "enable debugging output"); static uint default_video_input = HDPVR_VIDEO_INPUTS; module_param(default_video_input, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / 1=S-Video / 2=Composite"); static uint default_audio_input = HDPVR_AUDIO_INPUTS; module_param(default_audio_input, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / 1=RCA front / 2=S/PDIF"); static bool boost_audio; module_param(boost_audio, bool, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(boost_audio, "boost the audio signal"); /* table of devices that work with this driver */ static const struct usb_device_id hdpvr_table[] = { { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID1) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID2) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID3) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID4) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, hdpvr_table); void hdpvr_delete(struct hdpvr_device *dev) { hdpvr_free_buffers(dev); usb_put_dev(dev->udev); } static void challenge(u8 *bytes) { __le64 *i64P; u64 tmp64; uint i, idx; for (idx = 0; idx < 32; ++idx) { if (idx & 0x3) bytes[(idx >> 3) + 3] = bytes[(idx >> 2) & 0x3]; switch (idx & 0x3) { case 0x3: bytes[2] += bytes[3] * 4 + bytes[4] + bytes[5]; bytes[4] += bytes[(idx & 0x1) * 2] * 9 + 9; break; case 0x1: bytes[0] *= 8; bytes[0] += 7*idx + 4; bytes[6] += bytes[3] * 3; break; case 0x0: bytes[3 - (idx >> 3)] = bytes[idx >> 2]; bytes[5] += bytes[6] * 3; for (i = 0; i < 3; i++) bytes[3] *= bytes[3] + 1; break; case 0x2: for (i = 0; i < 3; i++) bytes[1] *= bytes[6] + 1; for (i = 0; i < 3; i++) { i64P = (__le64 *)bytes; tmp64 = le64_to_cpup(i64P); tmp64 = tmp64 + (tmp64 << (bytes[7] & 0x0f)); *i64P = cpu_to_le64(tmp64); } break; } } } /* try to init the device like the windows driver */ static int device_authorization(struct hdpvr_device *dev) { int ret, retval = -ENOMEM; char request_type = 0x38, rcv_request = 0x81; char *response; mutex_lock(&dev->usbc_mutex); ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), rcv_request, 0x80 | request_type, 0x0400, 0x0003, dev->usbc_buf, 46, 10000); if (ret != 46) { v4l2_err(&dev->v4l2_dev, "unexpected answer of status request, len %d\n", ret); goto unlock; } #ifdef HDPVR_DEBUG else { v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "Status request returned, len %d: %46ph\n", ret, dev->usbc_buf); } #endif dev->fw_ver = dev->usbc_buf[1]; dev->usbc_buf[46] = '\0'; v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n", dev->fw_ver, &dev->usbc_buf[2]); if (dev->fw_ver > 0x15) { dev->options.brightness = 0x80; dev->options.contrast = 0x40; dev->options.hue = 0xf; dev->options.saturation = 0x40; dev->options.sharpness = 0x80; } switch (dev->fw_ver) { case HDPVR_FIRMWARE_VERSION: dev->flags &= ~HDPVR_FLAG_AC3_CAP; break; case HDPVR_FIRMWARE_VERSION_AC3: case HDPVR_FIRMWARE_VERSION_0X12: case HDPVR_FIRMWARE_VERSION_0X15: case HDPVR_FIRMWARE_VERSION_0X1E: dev->flags |= HDPVR_FLAG_AC3_CAP; break; default: v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might not work.\n"); if (dev->fw_ver >= HDPVR_FIRMWARE_VERSION_AC3) dev->flags |= HDPVR_FLAG_AC3_CAP; else dev->flags &= ~HDPVR_FLAG_AC3_CAP; } response = dev->usbc_buf+38; #ifdef HDPVR_DEBUG v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "challenge: %8ph\n", response); #endif challenge(response); #ifdef HDPVR_DEBUG v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %8ph\n", response); #endif msleep(100); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd1, 0x00 | request_type, 0x0000, 0x0000, response, 8, 10000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "magic request returned %d\n", ret); retval = ret != 8; unlock: mutex_unlock(&dev->usbc_mutex); return retval; } static int hdpvr_device_init(struct hdpvr_device *dev) { int ret; u8 *buf; if (device_authorization(dev)) return -EACCES; /* default options for init */ hdpvr_set_options(dev); /* set filter options */ mutex_lock(&dev->usbc_mutex); buf = dev->usbc_buf; buf[0] = 0x03; buf[1] = 0x03; buf[2] = 0x00; buf[3] = 0x00; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0x01, 0x38, CTRL_LOW_PASS_FILTER_VALUE, CTRL_DEFAULT_INDEX, buf, 4, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); mutex_unlock(&dev->usbc_mutex); /* enable fan and bling leds */ mutex_lock(&dev->usbc_mutex); buf[0] = 0x1; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd4, 0x38, 0, 0, buf, 1, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); /* boost analog audio */ buf[0] = boost_audio; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd5, 0x38, 0, 0, buf, 1, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); mutex_unlock(&dev->usbc_mutex); dev->status = STATUS_IDLE; return 0; } static const struct hdpvr_options hdpvr_default_options = { .video_std = HDPVR_60HZ, .video_input = HDPVR_COMPONENT, .audio_input = HDPVR_RCA_BACK, .bitrate = 65, /* 6 mbps */ .peak_bitrate = 90, /* 9 mbps */ .bitrate_mode = HDPVR_CONSTANT, .gop_mode = HDPVR_SIMPLE_IDR_GOP, .audio_codec = V4L2_MPEG_AUDIO_ENCODING_AAC, /* original picture controls for firmware version <= 0x15 */ /* updated in device_authorization() for newer firmware */ .brightness = 0x86, .contrast = 0x80, .hue = 0x80, .saturation = 0x80, .sharpness = 0x80, }; static int hdpvr_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct hdpvr_device *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; #if IS_ENABLED(CONFIG_I2C) struct i2c_client *client; #endif size_t buffer_size; int i; int dev_num; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { dev_err(&interface->dev, "Out of memory\n"); goto error; } /* init video transfer queues first of all */ /* to prevent oops in hdpvr_delete() on error paths */ INIT_LIST_HEAD(&dev->free_buff_list); INIT_LIST_HEAD(&dev->rec_buff_list); /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); goto error_free_dev; } mutex_init(&dev->io_mutex); mutex_init(&dev->i2c_mutex); mutex_init(&dev->usbc_mutex); dev->usbc_buf = kmalloc(64, GFP_KERNEL); if (!dev->usbc_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); goto error_v4l2_unregister; } init_waitqueue_head(&dev->wait_buffer); init_waitqueue_head(&dev->wait_data); dev->options = hdpvr_default_options; if (default_video_input < HDPVR_VIDEO_INPUTS) dev->options.video_input = default_video_input; if (default_audio_input < HDPVR_AUDIO_INPUTS) { dev->options.audio_input = default_audio_input; if (default_audio_input == HDPVR_SPDIF) dev->options.audio_codec = V4L2_MPEG_AUDIO_ENCODING_AC3; } dev->udev = usb_get_dev(interface_to_usbdev(interface)); /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) { /* USB interface description is buggy, reported max * packet size is 512 bytes, windows driver uses 8192 */ buffer_size = 8192; dev->bulk_in_size = buffer_size; dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; } } if (!dev->bulk_in_endpointAddr) { v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n"); goto error_put_usb; } /* init the device */ if (hdpvr_device_init(dev)) { v4l2_err(&dev->v4l2_dev, "device init failed\n"); goto error_put_usb; } mutex_lock(&dev->io_mutex); if (hdpvr_alloc_buffers(dev, NUM_BUFFERS)) { mutex_unlock(&dev->io_mutex); v4l2_err(&dev->v4l2_dev, "allocating transfer buffers failed\n"); goto error_put_usb; } mutex_unlock(&dev->io_mutex); #if IS_ENABLED(CONFIG_I2C) retval = hdpvr_register_i2c_adapter(dev); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); goto error_free_buffers; } client = hdpvr_register_ir_i2c(dev); if (IS_ERR(client)) { v4l2_err(&dev->v4l2_dev, "i2c IR device register failed\n"); retval = PTR_ERR(client); goto reg_fail; } #endif dev_num = atomic_inc_return(&dev_nr); if (dev_num >= HDPVR_MAX) { v4l2_err(&dev->v4l2_dev, "max device number reached, device register failed\n"); atomic_dec(&dev_nr); retval = -ENODEV; goto reg_fail; } retval = hdpvr_register_videodev(dev, &interface->dev, video_nr[dev_num]); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); goto reg_fail; } /* let the user know what node this device is now attached to */ v4l2_info(&dev->v4l2_dev, "device now attached to %s\n", video_device_node_name(&dev->video_dev)); return 0; reg_fail: #if IS_ENABLED(CONFIG_I2C) i2c_del_adapter(&dev->i2c_adapter); error_free_buffers: #endif hdpvr_free_buffers(dev); error_put_usb: usb_put_dev(dev->udev); kfree(dev->usbc_buf); error_v4l2_unregister: v4l2_device_unregister(&dev->v4l2_dev); error_free_dev: kfree(dev); error: return retval; } static void hdpvr_disconnect(struct usb_interface *interface) { struct hdpvr_device *dev = to_hdpvr_dev(usb_get_intfdata(interface)); v4l2_info(&dev->v4l2_dev, "device %s disconnected\n", video_device_node_name(&dev->video_dev)); /* prevent more I/O from starting and stop any ongoing */ mutex_lock(&dev->io_mutex); dev->status = STATUS_DISCONNECTED; wake_up_interruptible(&dev->wait_data); wake_up_interruptible(&dev->wait_buffer); mutex_unlock(&dev->io_mutex); v4l2_device_disconnect(&dev->v4l2_dev); msleep(100); flush_work(&dev->worker); mutex_lock(&dev->io_mutex); hdpvr_cancel_queue(dev); mutex_unlock(&dev->io_mutex); #if IS_ENABLED(CONFIG_I2C) i2c_del_adapter(&dev->i2c_adapter); #endif video_unregister_device(&dev->video_dev); atomic_dec(&dev_nr); } static struct usb_driver hdpvr_usb_driver = { .name = "hdpvr", .probe = hdpvr_probe, .disconnect = hdpvr_disconnect, .id_table = hdpvr_table, }; module_usb_driver(hdpvr_usb_driver); MODULE_LICENSE("GPL"); MODULE_VERSION("0.2.1"); MODULE_AUTHOR("Janne Grunau"); MODULE_DESCRIPTION("Hauppauge HD PVR driver"); |
| 3 1 1 3 3 1 2 3 3 3 7 24 17 23 22 22 22 24 3 20 28 20 1 1 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 3 1 1 3 3 4 3 1 1 1 1 1 1 1 1 1 4 2 2 2 2 4 7 4 26 26 26 24 26 131 1 1 1 1 1 1 1 2 1 1 1 1 1 8 8 2 2 8 8 7 7 7 7 7 7 7 7 1 1 1 7 8 20 21 20 20 15 15 15 15 1311 3 18 18 18 15 15 15 15 17 17 16 15 16 15 15 2 2 15 226 9 226 9 9 8 9 219 5 4 3 25 25 25 25 16 22 25 23 25 2 2 2 2 2 2 2 59 59 59 59 11 11 130 132 131 131 9 1 12 11 10 9 8 8 10 4 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 | // SPDX-License-Identifier: GPL-2.0-only /* * Memory merging support. * * This code enables dynamic sharing of identical pages found in different * memory areas, even if they are not shared by fork() * * Copyright (C) 2008-2009 Red Hat, Inc. * Authors: * Izik Eidus * Andrea Arcangeli * Chris Wright * Hugh Dickins */ #include <linux/errno.h> #include <linux/mm.h> #include <linux/mm_inline.h> #include <linux/fs.h> #include <linux/mman.h> #include <linux/sched.h> #include <linux/sched/mm.h> #include <linux/sched/cputime.h> #include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/spinlock.h> #include <linux/xxhash.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/rbtree.h> #include <linux/memory.h> #include <linux/mmu_notifier.h> #include <linux/swap.h> #include <linux/ksm.h> #include <linux/hashtable.h> #include <linux/freezer.h> #include <linux/oom.h> #include <linux/numa.h> #include <linux/pagewalk.h> #include <asm/tlbflush.h> #include "internal.h" #include "mm_slot.h" #define CREATE_TRACE_POINTS #include <trace/events/ksm.h> #ifdef CONFIG_NUMA #define NUMA(x) (x) #define DO_NUMA(x) do { (x); } while (0) #else #define NUMA(x) (0) #define DO_NUMA(x) do { } while (0) #endif typedef u8 rmap_age_t; /** * DOC: Overview * * A few notes about the KSM scanning process, * to make it easier to understand the data structures below: * * In order to reduce excessive scanning, KSM sorts the memory pages by their * contents into a data structure that holds pointers to the pages' locations. * * Since the contents of the pages may change at any moment, KSM cannot just * insert the pages into a normal sorted tree and expect it to find anything. * Therefore KSM uses two data structures - the stable and the unstable tree. * * The stable tree holds pointers to all the merged pages (ksm pages), sorted * by their contents. Because each such page is write-protected, searching on * this tree is fully assured to be working (except when pages are unmapped), * and therefore this tree is called the stable tree. * * The stable tree node includes information required for reverse * mapping from a KSM page to virtual addresses that map this page. * * In order to avoid large latencies of the rmap walks on KSM pages, * KSM maintains two types of nodes in the stable tree: * * * the regular nodes that keep the reverse mapping structures in a * linked list * * the "chains" that link nodes ("dups") that represent the same * write protected memory content, but each "dup" corresponds to a * different KSM page copy of that content * * Internally, the regular nodes, "dups" and "chains" are represented * using the same struct ksm_stable_node structure. * * In addition to the stable tree, KSM uses a second data structure called the * unstable tree: this tree holds pointers to pages which have been found to * be "unchanged for a period of time". The unstable tree sorts these pages * by their contents, but since they are not write-protected, KSM cannot rely * upon the unstable tree to work correctly - the unstable tree is liable to * be corrupted as its contents are modified, and so it is called unstable. * * KSM solves this problem by several techniques: * * 1) The unstable tree is flushed every time KSM completes scanning all * memory areas, and then the tree is rebuilt again from the beginning. * 2) KSM will only insert into the unstable tree, pages whose hash value * has not changed since the previous scan of all memory areas. * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the * colors of the nodes and not on their contents, assuring that even when * the tree gets "corrupted" it won't get out of balance, so scanning time * remains the same (also, searching and inserting nodes in an rbtree uses * the same algorithm, so we have no overhead when we flush and rebuild). * 4) KSM never flushes the stable tree, which means that even if it were to * take 10 attempts to find a page in the unstable tree, once it is found, * it is secured in the stable tree. (When we scan a new page, we first * compare it against the stable tree, and then against the unstable tree.) * * If the merge_across_nodes tunable is unset, then KSM maintains multiple * stable trees and multiple unstable trees: one of each for each NUMA node. */ /** * struct ksm_mm_slot - ksm information per mm that is being scanned * @slot: hash lookup from mm to mm_slot * @rmap_list: head for this mm_slot's singly-linked list of rmap_items */ struct ksm_mm_slot { struct mm_slot slot; struct ksm_rmap_item *rmap_list; }; /** * struct ksm_scan - cursor for scanning * @mm_slot: the current mm_slot we are scanning * @address: the next address inside that to be scanned * @rmap_list: link to the next rmap to be scanned in the rmap_list * @seqnr: count of completed full scans (needed when removing unstable node) * * There is only the one ksm_scan instance of this cursor structure. */ struct ksm_scan { struct ksm_mm_slot *mm_slot; unsigned long address; struct ksm_rmap_item **rmap_list; unsigned long seqnr; }; /** * struct ksm_stable_node - node of the stable rbtree * @node: rb node of this ksm page in the stable tree * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list * @hlist_dup: linked into the stable_node->hlist with a stable_node chain * @list: linked into migrate_nodes, pending placement in the proper node tree * @hlist: hlist head of rmap_items using this ksm page * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) * @chain_prune_time: time of the last full garbage collection * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN * @nid: NUMA node id of stable tree in which linked (may not match kpfn) */ struct ksm_stable_node { union { struct rb_node node; /* when node of stable tree */ struct { /* when listed for migration */ struct list_head *head; struct { struct hlist_node hlist_dup; struct list_head list; }; }; }; struct hlist_head hlist; union { unsigned long kpfn; unsigned long chain_prune_time; }; /* * STABLE_NODE_CHAIN can be any negative number in * rmap_hlist_len negative range, but better not -1 to be able * to reliably detect underflows. */ #define STABLE_NODE_CHAIN -1024 int rmap_hlist_len; #ifdef CONFIG_NUMA int nid; #endif }; /** * struct ksm_rmap_item - reverse mapping item for virtual addresses * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree * @nid: NUMA node id of unstable tree in which linked (may not match page) * @mm: the memory structure this rmap_item is pointing into * @address: the virtual address this rmap_item tracks (+ flags in low bits) * @oldchecksum: previous checksum of the page at that virtual address * @node: rb node of this rmap_item in the unstable tree * @head: pointer to stable_node heading this list in the stable tree * @hlist: link into hlist of rmap_items hanging off that stable_node * @age: number of scan iterations since creation * @remaining_skips: how many scans to skip */ struct ksm_rmap_item { struct ksm_rmap_item *rmap_list; union { struct anon_vma *anon_vma; /* when stable */ #ifdef CONFIG_NUMA int nid; /* when node of unstable tree */ #endif }; struct mm_struct *mm; unsigned long address; /* + low bits used for flags below */ unsigned int oldchecksum; /* when unstable */ rmap_age_t age; rmap_age_t remaining_skips; union { struct rb_node node; /* when node of unstable tree */ struct { /* when listed from stable tree */ struct ksm_stable_node *head; struct hlist_node hlist; }; }; }; #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ #define STABLE_FLAG 0x200 /* is listed from the stable tree */ /* The stable and unstable tree heads */ static struct rb_root one_stable_tree[1] = { RB_ROOT }; static struct rb_root one_unstable_tree[1] = { RB_ROOT }; static struct rb_root *root_stable_tree = one_stable_tree; static struct rb_root *root_unstable_tree = one_unstable_tree; /* Recently migrated nodes of stable tree, pending proper placement */ static LIST_HEAD(migrate_nodes); #define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev) #define MM_SLOTS_HASH_BITS 10 static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); static struct ksm_mm_slot ksm_mm_head = { .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node), }; static struct ksm_scan ksm_scan = { .mm_slot = &ksm_mm_head, }; static struct kmem_cache *rmap_item_cache; static struct kmem_cache *stable_node_cache; static struct kmem_cache *mm_slot_cache; /* Default number of pages to scan per batch */ #define DEFAULT_PAGES_TO_SCAN 100 /* The number of pages scanned */ static unsigned long ksm_pages_scanned; /* The number of nodes in the stable tree */ static unsigned long ksm_pages_shared; /* The number of page slots additionally sharing those nodes */ static unsigned long ksm_pages_sharing; /* The number of nodes in the unstable tree */ static unsigned long ksm_pages_unshared; /* The number of rmap_items in use: to calculate pages_volatile */ static unsigned long ksm_rmap_items; /* The number of stable_node chains */ static unsigned long ksm_stable_node_chains; /* The number of stable_node dups linked to the stable_node chains */ static unsigned long ksm_stable_node_dups; /* Delay in pruning stale stable_node_dups in the stable_node_chains */ static unsigned int ksm_stable_node_chains_prune_millisecs = 2000; /* Maximum number of page slots sharing a stable node */ static int ksm_max_page_sharing = 256; /* Number of pages ksmd should scan in one batch */ static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; /* Milliseconds ksmd should sleep between batches */ static unsigned int ksm_thread_sleep_millisecs = 20; /* Checksum of an empty (zeroed) page */ static unsigned int zero_checksum __read_mostly; /* Whether to merge empty (zeroed) pages with actual zero pages */ static bool ksm_use_zero_pages __read_mostly; /* Skip pages that couldn't be de-duplicated previously */ /* Default to true at least temporarily, for testing */ static bool ksm_smart_scan = true; /* The number of zero pages which is placed by KSM */ atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0); /* The number of pages that have been skipped due to "smart scanning" */ static unsigned long ksm_pages_skipped; /* Don't scan more than max pages per batch. */ static unsigned long ksm_advisor_max_pages_to_scan = 30000; /* Min CPU for scanning pages per scan */ #define KSM_ADVISOR_MIN_CPU 10 /* Max CPU for scanning pages per scan */ static unsigned int ksm_advisor_max_cpu = 70; /* Target scan time in seconds to analyze all KSM candidate pages. */ static unsigned long ksm_advisor_target_scan_time = 200; /* Exponentially weighted moving average. */ #define EWMA_WEIGHT 30 /** * struct advisor_ctx - metadata for KSM advisor * @start_scan: start time of the current scan * @scan_time: scan time of previous scan * @change: change in percent to pages_to_scan parameter * @cpu_time: cpu time consumed by the ksmd thread in the previous scan */ struct advisor_ctx { ktime_t start_scan; unsigned long scan_time; unsigned long change; unsigned long long cpu_time; }; static struct advisor_ctx advisor_ctx; /* Define different advisor's */ enum ksm_advisor_type { KSM_ADVISOR_NONE, KSM_ADVISOR_SCAN_TIME, }; static enum ksm_advisor_type ksm_advisor; #ifdef CONFIG_SYSFS /* * Only called through the sysfs control interface: */ /* At least scan this many pages per batch. */ static unsigned long ksm_advisor_min_pages_to_scan = 500; static void set_advisor_defaults(void) { if (ksm_advisor == KSM_ADVISOR_NONE) { ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN; } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) { advisor_ctx = (const struct advisor_ctx){ 0 }; ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan; } } #endif /* CONFIG_SYSFS */ static inline void advisor_start_scan(void) { if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) advisor_ctx.start_scan = ktime_get(); } /* * Use previous scan time if available, otherwise use current scan time as an * approximation for the previous scan time. */ static inline unsigned long prev_scan_time(struct advisor_ctx *ctx, unsigned long scan_time) { return ctx->scan_time ? ctx->scan_time : scan_time; } /* Calculate exponential weighted moving average */ static unsigned long ewma(unsigned long prev, unsigned long curr) { return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100; } /* * The scan time advisor is based on the current scan rate and the target * scan rate. * * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time) * * To avoid perturbations it calculates a change factor of previous changes. * A new change factor is calculated for each iteration and it uses an * exponentially weighted moving average. The new pages_to_scan value is * multiplied with that change factor: * * new_pages_to_scan *= change facor * * The new_pages_to_scan value is limited by the cpu min and max values. It * calculates the cpu percent for the last scan and calculates the new * estimated cpu percent cost for the next scan. That value is capped by the * cpu min and max setting. * * In addition the new pages_to_scan value is capped by the max and min * limits. */ static void scan_time_advisor(void) { unsigned int cpu_percent; unsigned long cpu_time; unsigned long cpu_time_diff; unsigned long cpu_time_diff_ms; unsigned long pages; unsigned long per_page_cost; unsigned long factor; unsigned long change; unsigned long last_scan_time; unsigned long scan_time; /* Convert scan time to seconds */ scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan), MSEC_PER_SEC); scan_time = scan_time ? scan_time : 1; /* Calculate CPU consumption of ksmd background thread */ cpu_time = task_sched_runtime(current); cpu_time_diff = cpu_time - advisor_ctx.cpu_time; cpu_time_diff_ms = cpu_time_diff / 1000 / 1000; cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000); cpu_percent = cpu_percent ? cpu_percent : 1; last_scan_time = prev_scan_time(&advisor_ctx, scan_time); /* Calculate scan time as percentage of target scan time */ factor = ksm_advisor_target_scan_time * 100 / scan_time; factor = factor ? factor : 1; /* * Calculate scan time as percentage of last scan time and use * exponentially weighted average to smooth it */ change = scan_time * 100 / last_scan_time; change = change ? change : 1; change = ewma(advisor_ctx.change, change); /* Calculate new scan rate based on target scan rate. */ pages = ksm_thread_pages_to_scan * 100 / factor; /* Update pages_to_scan by weighted change percentage. */ pages = pages * change / 100; /* Cap new pages_to_scan value */ per_page_cost = ksm_thread_pages_to_scan / cpu_percent; per_page_cost = per_page_cost ? per_page_cost : 1; pages = min(pages, per_page_cost * ksm_advisor_max_cpu); pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU); pages = min(pages, ksm_advisor_max_pages_to_scan); /* Update advisor context */ advisor_ctx.change = change; advisor_ctx.scan_time = scan_time; advisor_ctx.cpu_time = cpu_time; ksm_thread_pages_to_scan = pages; trace_ksm_advisor(scan_time, pages, cpu_percent); } static void advisor_stop_scan(void) { if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) scan_time_advisor(); } #ifdef CONFIG_NUMA /* Zeroed when merging across nodes is not allowed */ static unsigned int ksm_merge_across_nodes = 1; static int ksm_nr_node_ids = 1; #else #define ksm_merge_across_nodes 1U #define ksm_nr_node_ids 1 #endif #define KSM_RUN_STOP 0 #define KSM_RUN_MERGE 1 #define KSM_RUN_UNMERGE 2 #define KSM_RUN_OFFLINE 4 static unsigned long ksm_run = KSM_RUN_STOP; static void wait_while_offlining(void); static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait); static DEFINE_MUTEX(ksm_thread_mutex); static DEFINE_SPINLOCK(ksm_mmlist_lock); static int __init ksm_slab_init(void) { rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0); if (!rmap_item_cache) goto out; stable_node_cache = KMEM_CACHE(ksm_stable_node, 0); if (!stable_node_cache) goto out_free1; mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0); if (!mm_slot_cache) goto out_free2; return 0; out_free2: kmem_cache_destroy(stable_node_cache); out_free1: kmem_cache_destroy(rmap_item_cache); out: return -ENOMEM; } static void __init ksm_slab_free(void) { kmem_cache_destroy(mm_slot_cache); kmem_cache_destroy(stable_node_cache); kmem_cache_destroy(rmap_item_cache); mm_slot_cache = NULL; } static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain) { return chain->rmap_hlist_len == STABLE_NODE_CHAIN; } static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup) { return dup->head == STABLE_NODE_DUP_HEAD; } static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup, struct ksm_stable_node *chain) { VM_BUG_ON(is_stable_node_dup(dup)); dup->head = STABLE_NODE_DUP_HEAD; VM_BUG_ON(!is_stable_node_chain(chain)); hlist_add_head(&dup->hlist_dup, &chain->hlist); ksm_stable_node_dups++; } static inline void __stable_node_dup_del(struct ksm_stable_node *dup) { VM_BUG_ON(!is_stable_node_dup(dup)); hlist_del(&dup->hlist_dup); ksm_stable_node_dups--; } static inline void stable_node_dup_del(struct ksm_stable_node *dup) { VM_BUG_ON(is_stable_node_chain(dup)); if (is_stable_node_dup(dup)) __stable_node_dup_del(dup); else rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid)); #ifdef CONFIG_DEBUG_VM dup->head = NULL; #endif } static inline struct ksm_rmap_item *alloc_rmap_item(void) { struct ksm_rmap_item *rmap_item; rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); if (rmap_item) ksm_rmap_items++; return rmap_item; } static inline void free_rmap_item(struct ksm_rmap_item *rmap_item) { ksm_rmap_items--; rmap_item->mm->ksm_rmap_items--; rmap_item->mm = NULL; /* debug safety */ kmem_cache_free(rmap_item_cache, rmap_item); } static inline struct ksm_stable_node *alloc_stable_node(void) { /* * The allocation can take too long with GFP_KERNEL when memory is under * pressure, which may lead to hung task warnings. Adding __GFP_HIGH * grants access to memory reserves, helping to avoid this problem. */ return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH); } static inline void free_stable_node(struct ksm_stable_node *stable_node) { VM_BUG_ON(stable_node->rmap_hlist_len && !is_stable_node_chain(stable_node)); kmem_cache_free(stable_node_cache, stable_node); } /* * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's * page tables after it has passed through ksm_exit() - which, if necessary, * takes mmap_lock briefly to serialize against them. ksm_exit() does not set * a special flag: they can just back out as soon as mm_users goes to zero. * ksm_test_exit() is used throughout to make this test for exit: in some * places for correctness, in some places just to avoid unnecessary work. */ static inline bool ksm_test_exit(struct mm_struct *mm) { return atomic_read(&mm->mm_users) == 0; } /* * We use break_ksm to break COW on a ksm page by triggering unsharing, * such that the ksm page will get replaced by an exclusive anonymous page. * * We take great care only to touch a ksm page, in a VM_MERGEABLE vma, * in case the application has unmapped and remapped mm,addr meanwhile. * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP * mmap of /dev/mem, where we would not want to touch it. * * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context * of the process that owns 'vma'. We also do not want to enforce * protection keys here anyway. */ static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) { vm_fault_t ret = 0; if (lock_vma) vma_start_write(vma); do { bool ksm_page = false; struct folio_walk fw; struct folio *folio; cond_resched(); folio = folio_walk_start(&fw, vma, addr, FW_MIGRATION | FW_ZEROPAGE); if (folio) { /* Small folio implies FW_LEVEL_PTE. */ if (!folio_test_large(folio) && (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte))) ksm_page = true; folio_walk_end(&fw, vma); } if (!ksm_page) return 0; ret = handle_mm_fault(vma, addr, FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, NULL); } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); /* * We must loop until we no longer find a KSM page because * handle_mm_fault() may back out if there's any difficulty e.g. if * pte accessed bit gets updated concurrently. * * VM_FAULT_SIGBUS could occur if we race with truncation of the * backing file, which also invalidates anonymous pages: that's * okay, that truncation will have unmapped the KSM page for us. * * VM_FAULT_OOM: at the time of writing (late July 2009), setting * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the * current task has TIF_MEMDIE set, and will be OOM killed on return * to user; and ksmd, having no mm, would never be chosen for that. * * But if the mm is in a limited mem_cgroup, then the fault may fail * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and * even ksmd can fail in this way - though it's usually breaking ksm * just to undo a merge it made a moment before, so unlikely to oom. * * That's a pity: we might therefore have more kernel pages allocated * than we're counting as nodes in the stable tree; but ksm_do_scan * will retry to break_cow on each pass, so should recover the page * in due course. The important thing is to not let VM_MERGEABLE * be cleared while any such pages might remain in the area. */ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; } static bool vma_ksm_compatible(struct vm_area_struct *vma) { if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_HUGETLB | VM_MIXEDMAP| VM_DROPPABLE)) return false; /* just ignore the advice */ if (vma_is_dax(vma)) return false; #ifdef VM_SAO if (vma->vm_flags & VM_SAO) return false; #endif #ifdef VM_SPARC_ADI if (vma->vm_flags & VM_SPARC_ADI) return false; #endif return true; } static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; if (ksm_test_exit(mm)) return NULL; vma = vma_lookup(mm, addr); if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) return NULL; return vma; } static void break_cow(struct ksm_rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; /* * It is not an accident that whenever we want to break COW * to undo, we also need to drop a reference to the anon_vma. */ put_anon_vma(rmap_item->anon_vma); mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (vma) break_ksm(vma, addr, false); mmap_read_unlock(mm); } static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item) { struct mm_struct *mm = rmap_item->mm; unsigned long addr = rmap_item->address; struct vm_area_struct *vma; struct page *page = NULL; struct folio_walk fw; struct folio *folio; mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (!vma) goto out; folio = folio_walk_start(&fw, vma, addr, 0); if (folio) { if (!folio_is_zone_device(folio) && folio_test_anon(folio)) { folio_get(folio); page = fw.page; } folio_walk_end(&fw, vma); } out: if (page) { flush_anon_page(vma, page, addr); flush_dcache_page(page); } mmap_read_unlock(mm); return page; } /* * This helper is used for getting right index into array of tree roots. * When merge_across_nodes knob is set to 1, there are only two rb-trees for * stable and unstable pages from all nodes with roots in index 0. Otherwise, * every node has its own stable and unstable tree. */ static inline int get_kpfn_nid(unsigned long kpfn) { return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); } static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup, struct rb_root *root) { struct ksm_stable_node *chain = alloc_stable_node(); VM_BUG_ON(is_stable_node_chain(dup)); if (likely(chain)) { INIT_HLIST_HEAD(&chain->hlist); chain->chain_prune_time = jiffies; chain->rmap_hlist_len = STABLE_NODE_CHAIN; #if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA) chain->nid = NUMA_NO_NODE; /* debug */ #endif ksm_stable_node_chains++; /* * Put the stable node chain in the first dimension of * the stable tree and at the same time remove the old * stable node. */ rb_replace_node(&dup->node, &chain->node, root); /* * Move the old stable node to the second dimension * queued in the hlist_dup. The invariant is that all * dup stable_nodes in the chain->hlist point to pages * that are write protected and have the exact same * content. */ stable_node_chain_add_dup(dup, chain); } return chain; } static inline void free_stable_node_chain(struct ksm_stable_node *chain, struct rb_root *root) { rb_erase(&chain->node, root); free_stable_node(chain); ksm_stable_node_chains--; } static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node) { struct ksm_rmap_item *rmap_item; /* check it's not STABLE_NODE_CHAIN or negative */ BUG_ON(stable_node->rmap_hlist_len < 0); hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { if (rmap_item->hlist.next) { ksm_pages_sharing--; trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm); } else { ksm_pages_shared--; } rmap_item->mm->ksm_merging_pages--; VM_BUG_ON(stable_node->rmap_hlist_len <= 0); stable_node->rmap_hlist_len--; put_anon_vma(rmap_item->anon_vma); rmap_item->address &= PAGE_MASK; cond_resched(); } /* * We need the second aligned pointer of the migrate_nodes * list_head to stay clear from the rb_parent_color union * (aligned and different than any node) and also different * from &migrate_nodes. This will verify that future list.h changes * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it. */ BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes); BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1); trace_ksm_remove_ksm_page(stable_node->kpfn); if (stable_node->head == &migrate_nodes) list_del(&stable_node->list); else stable_node_dup_del(stable_node); free_stable_node(stable_node); } enum ksm_get_folio_flags { KSM_GET_FOLIO_NOLOCK, KSM_GET_FOLIO_LOCK, KSM_GET_FOLIO_TRYLOCK }; /* * ksm_get_folio: checks if the page indicated by the stable node * is still its ksm page, despite having held no reference to it. * In which case we can trust the content of the page, and it * returns the gotten page; but if the page has now been zapped, * remove the stale node from the stable tree and return NULL. * But beware, the stable node's page might be being migrated. * * You would expect the stable_node to hold a reference to the ksm page. * But if it increments the page's count, swapping out has to wait for * ksmd to come around again before it can free the page, which may take * seconds or even minutes: much too unresponsive. So instead we use a * "keyhole reference": access to the ksm page from the stable node peeps * out through its keyhole to see if that page still holds the right key, * pointing back to this stable node. This relies on freeing a PageAnon * page to reset its page->mapping to NULL, and relies on no other use of * a page to put something that might look like our key in page->mapping. * is on its way to being freed; but it is an anomaly to bear in mind. */ static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node, enum ksm_get_folio_flags flags) { struct folio *folio; void *expected_mapping; unsigned long kpfn; expected_mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); again: kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ folio = pfn_folio(kpfn); if (READ_ONCE(folio->mapping) != expected_mapping) goto stale; /* * We cannot do anything with the page while its refcount is 0. * Usually 0 means free, or tail of a higher-order page: in which * case this node is no longer referenced, and should be freed; * however, it might mean that the page is under page_ref_freeze(). * The __remove_mapping() case is easy, again the node is now stale; * the same is in reuse_ksm_page() case; but if page is swapcache * in folio_migrate_mapping(), it might still be our page, * in which case it's essential to keep the node. */ while (!folio_try_get(folio)) { /* * Another check for folio->mapping != expected_mapping * would work here too. We have chosen to test the * swapcache flag to optimize the common case, when the * folio is or is about to be freed: the swapcache flag * is cleared (under spin_lock_irq) in the ref_freeze * section of __remove_mapping(); but anon folio->mapping * is reset to NULL later, in free_pages_prepare(). */ if (!folio_test_swapcache(folio)) goto stale; cpu_relax(); } if (READ_ONCE(folio->mapping) != expected_mapping) { folio_put(folio); goto stale; } if (flags == KSM_GET_FOLIO_TRYLOCK) { if (!folio_trylock(folio)) { folio_put(folio); return ERR_PTR(-EBUSY); } } else if (flags == KSM_GET_FOLIO_LOCK) folio_lock(folio); if (flags != KSM_GET_FOLIO_NOLOCK) { if (READ_ONCE(folio->mapping) != expected_mapping) { folio_unlock(folio); folio_put(folio); goto stale; } } return folio; stale: /* * We come here from above when folio->mapping or the swapcache flag * suggests that the node is stale; but it might be under migration. * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(), * before checking whether node->kpfn has been changed. */ smp_rmb(); if (READ_ONCE(stable_node->kpfn) != kpfn) goto again; remove_node_from_stable_tree(stable_node); return NULL; } /* * Removing rmap_item from stable or unstable tree. * This function will clean the information from the stable/unstable tree. */ static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item) { if (rmap_item->address & STABLE_FLAG) { struct ksm_stable_node *stable_node; struct folio *folio; stable_node = rmap_item->head; folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); if (!folio) goto out; hlist_del(&rmap_item->hlist); folio_unlock(folio); folio_put(folio); if (!hlist_empty(&stable_node->hlist)) ksm_pages_sharing--; else ksm_pages_shared--; rmap_item->mm->ksm_merging_pages--; VM_BUG_ON(stable_node->rmap_hlist_len <= 0); stable_node->rmap_hlist_len--; put_anon_vma(rmap_item->anon_vma); rmap_item->head = NULL; rmap_item->address &= PAGE_MASK; } else if (rmap_item->address & UNSTABLE_FLAG) { unsigned char age; /* * Usually ksmd can and must skip the rb_erase, because * root_unstable_tree was already reset to RB_ROOT. * But be careful when an mm is exiting: do the rb_erase * if this rmap_item was inserted by this scan, rather * than left over from before. */ age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); BUG_ON(age > 1); if (!age) rb_erase(&rmap_item->node, root_unstable_tree + NUMA(rmap_item->nid)); ksm_pages_unshared--; rmap_item->address &= PAGE_MASK; } out: cond_resched(); /* we're called from many long loops */ } static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) { while (*rmap_list) { struct ksm_rmap_item *rmap_item = *rmap_list; *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } } /* * Though it's very tempting to unmerge rmap_items from stable tree rather * than check every pte of a given vma, the locking doesn't quite work for * that - an rmap_item is assigned to the stable tree after inserting ksm * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing * rmap_items from parent to child at fork time (so as not to waste time * if exit comes before the next scan reaches it). * * Similarly, although we'd like to remove rmap_items (so updating counts * and freeing memory) when unmerging an area, it's easier to leave that * to the next pass of ksmd - consider, for example, how ksmd might be * in cmp_and_merge_page on one of the rmap_items we would be removing. */ static int unmerge_ksm_pages(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool lock_vma) { unsigned long addr; int err = 0; for (addr = start; addr < end && !err; addr += PAGE_SIZE) { if (ksm_test_exit(vma->vm_mm)) break; if (signal_pending(current)) err = -ERESTARTSYS; else err = break_ksm(vma, addr, lock_vma); } return err; } static inline struct ksm_stable_node *folio_stable_node(const struct folio *folio) { return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL; } static inline struct ksm_stable_node *page_stable_node(struct page *page) { return folio_stable_node(page_folio(page)); } static inline void folio_set_stable_node(struct folio *folio, struct ksm_stable_node *stable_node) { VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio); folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); } #ifdef CONFIG_SYSFS /* * Only called through the sysfs control interface: */ static int remove_stable_node(struct ksm_stable_node *stable_node) { struct folio *folio; int err; folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK); if (!folio) { /* * ksm_get_folio did remove_node_from_stable_tree itself. */ return 0; } /* * Page could be still mapped if this races with __mmput() running in * between ksm_exit() and exit_mmap(). Just refuse to let * merge_across_nodes/max_page_sharing be switched. */ err = -EBUSY; if (!folio_mapped(folio)) { /* * The stable node did not yet appear stale to ksm_get_folio(), * since that allows for an unmapped ksm folio to be recognized * right up until it is freed; but the node is safe to remove. * This folio might be in an LRU cache waiting to be freed, * or it might be in the swapcache (perhaps under writeback), * or it might have been removed from swapcache a moment ago. */ folio_set_stable_node(folio, NULL); remove_node_from_stable_tree(stable_node); err = 0; } folio_unlock(folio); folio_put(folio); return err; } static int remove_stable_node_chain(struct ksm_stable_node *stable_node, struct rb_root *root) { struct ksm_stable_node *dup; struct hlist_node *hlist_safe; if (!is_stable_node_chain(stable_node)) { VM_BUG_ON(is_stable_node_dup(stable_node)); if (remove_stable_node(stable_node)) return true; else return false; } hlist_for_each_entry_safe(dup, hlist_safe, &stable_node->hlist, hlist_dup) { VM_BUG_ON(!is_stable_node_dup(dup)); if (remove_stable_node(dup)) return true; } BUG_ON(!hlist_empty(&stable_node->hlist)); free_stable_node_chain(stable_node, root); return false; } static int remove_all_stable_nodes(void) { struct ksm_stable_node *stable_node, *next; int nid; int err = 0; for (nid = 0; nid < ksm_nr_node_ids; nid++) { while (root_stable_tree[nid].rb_node) { stable_node = rb_entry(root_stable_tree[nid].rb_node, struct ksm_stable_node, node); if (remove_stable_node_chain(stable_node, root_stable_tree + nid)) { err = -EBUSY; break; /* proceed to next nid */ } cond_resched(); } } list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { if (remove_stable_node(stable_node)) err = -EBUSY; cond_resched(); } return err; } static int unmerge_and_remove_all_rmap_items(void) { struct ksm_mm_slot *mm_slot; struct mm_slot *slot; struct mm_struct *mm; struct vm_area_struct *vma; int err = 0; spin_lock(&ksm_mmlist_lock); slot = list_entry(ksm_mm_head.slot.mm_node.next, struct mm_slot, mm_node); ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); spin_unlock(&ksm_mmlist_lock); for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { VMA_ITERATOR(vmi, mm_slot->slot.mm, 0); mm = mm_slot->slot.mm; mmap_read_lock(mm); /* * Exit right away if mm is exiting to avoid lockdep issue in * the maple tree */ if (ksm_test_exit(mm)) goto mm_exiting; for_each_vma(vmi, vma) { if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) continue; err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, false); if (err) goto error; } mm_exiting: remove_trailing_rmap_items(&mm_slot->rmap_list); mmap_read_unlock(mm); spin_lock(&ksm_mmlist_lock); slot = list_entry(mm_slot->slot.mm_node.next, struct mm_slot, mm_node); ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); if (ksm_test_exit(mm)) { hash_del(&mm_slot->slot.hash); list_del(&mm_slot->slot.mm_node); spin_unlock(&ksm_mmlist_lock); mm_slot_free(mm_slot_cache, mm_slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); mmdrop(mm); } else spin_unlock(&ksm_mmlist_lock); } /* Clean up stable nodes, but don't worry if some are still busy */ remove_all_stable_nodes(); ksm_scan.seqnr = 0; return 0; error: mmap_read_unlock(mm); spin_lock(&ksm_mmlist_lock); ksm_scan.mm_slot = &ksm_mm_head; spin_unlock(&ksm_mmlist_lock); return err; } #endif /* CONFIG_SYSFS */ static u32 calc_checksum(struct page *page) { u32 checksum; void *addr = kmap_local_page(page); checksum = xxhash(addr, PAGE_SIZE, 0); kunmap_local(addr); return checksum; } static int write_protect_page(struct vm_area_struct *vma, struct folio *folio, pte_t *orig_pte) { struct mm_struct *mm = vma->vm_mm; DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0); int swapped; int err = -EFAULT; struct mmu_notifier_range range; bool anon_exclusive; pte_t entry; if (WARN_ON_ONCE(folio_test_large(folio))) return err; pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma); if (pvmw.address == -EFAULT) goto out; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, pvmw.address + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); if (!page_vma_mapped_walk(&pvmw)) goto out_mn; if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) goto out_unlock; entry = ptep_get(pvmw.pte); /* * Handle PFN swap PTEs, such as device-exclusive ones, that actually * map pages: give up just like the next folio_walk would. */ if (unlikely(!pte_present(entry))) goto out_unlock; anon_exclusive = PageAnonExclusive(&folio->page); if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { swapped = folio_test_swapcache(folio); flush_cache_page(vma, pvmw.address, folio_pfn(folio)); /* * Ok this is tricky, when get_user_pages_fast() run it doesn't * take any lock, therefore the check that we are going to make * with the pagecount against the mapcount is racy and * O_DIRECT can happen right after the check. * So we clear the pte and flush the tlb before the check * this assure us that no O_DIRECT can happen after the check * or in the middle of the check. * * No need to notify as we are downgrading page table to read * only not changing it to point to a new page. * * See Documentation/mm/mmu_notifier.rst */ entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); /* * Check that no O_DIRECT or similar I/O is in progress on the * page */ if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } /* See folio_try_share_anon_rmap_pte(): clear PTE first. */ if (anon_exclusive && folio_try_share_anon_rmap_pte(folio, &folio->page)) { set_pte_at(mm, pvmw.address, pvmw.pte, entry); goto out_unlock; } if (pte_dirty(entry)) folio_mark_dirty(folio); entry = pte_mkclean(entry); if (pte_write(entry)) entry = pte_wrprotect(entry); set_pte_at(mm, pvmw.address, pvmw.pte, entry); } *orig_pte = entry; err = 0; out_unlock: page_vma_mapped_walk_done(&pvmw); out_mn: mmu_notifier_invalidate_range_end(&range); out: return err; } /** * replace_page - replace page in vma by new ksm page * @vma: vma that holds the pte pointing to page * @page: the page we are replacing by kpage * @kpage: the ksm page we replace page by * @orig_pte: the original value of the pte * * Returns 0 on success, -EFAULT on failure. */ static int replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) { struct folio *kfolio = page_folio(kpage); struct mm_struct *mm = vma->vm_mm; struct folio *folio = page_folio(page); pmd_t *pmd; pmd_t pmde; pte_t *ptep; pte_t newpte; spinlock_t *ptl; unsigned long addr; int err = -EFAULT; struct mmu_notifier_range range; addr = page_address_in_vma(folio, page, vma); if (addr == -EFAULT) goto out; pmd = mm_find_pmd(mm, addr); if (!pmd) goto out; /* * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at() * without holding anon_vma lock for write. So when looking for a * genuine pmde (in which to find pte), test present and !THP together. */ pmde = pmdp_get_lockless(pmd); if (!pmd_present(pmde) || pmd_trans_huge(pmde)) goto out; mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr, addr + PAGE_SIZE); mmu_notifier_invalidate_range_start(&range); ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!ptep) goto out_mn; if (!pte_same(ptep_get(ptep), orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out_mn; } VM_BUG_ON_PAGE(PageAnonExclusive(page), page); VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage), kfolio); /* * No need to check ksm_use_zero_pages here: we can only have a * zero_page here if ksm_use_zero_pages was enabled already. */ if (!is_zero_pfn(page_to_pfn(kpage))) { folio_get(kfolio); folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE); newpte = mk_pte(kpage, vma->vm_page_prot); } else { /* * Use pte_mkdirty to mark the zero page mapped by KSM, and then * we can easily track all KSM-placed zero pages by checking if * the dirty bit in zero page's PTE is set. */ newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot))); ksm_map_zero_page(mm); /* * We're replacing an anonymous page with a zero page, which is * not anonymous. We need to do proper accounting otherwise we * will get wrong values in /proc, and a BUG message in dmesg * when tearing down the mm. */ dec_mm_counter(mm, MM_ANONPAGES); } flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); /* * No need to notify as we are replacing a read only page with another * read only page with the same content. * * See Documentation/mm/mmu_notifier.rst */ ptep_clear_flush(vma, addr, ptep); set_pte_at(mm, addr, ptep, newpte); folio_remove_rmap_pte(folio, page, vma); if (!folio_mapped(folio)) folio_free_swap(folio); folio_put(folio); pte_unmap_unlock(ptep, ptl); err = 0; out_mn: mmu_notifier_invalidate_range_end(&range); out: return err; } /* * try_to_merge_one_page - take two pages and merge them into one * @vma: the vma that holds the pte pointing to page * @page: the PageAnon page that we want to replace with kpage * @kpage: the KSM page that we want to map instead of page, * or NULL the first time when we want to use page as kpage. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_one_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) { struct folio *folio = page_folio(page); pte_t orig_pte = __pte(0); int err = -EFAULT; if (page == kpage) /* ksm page forked */ return 0; if (!folio_test_anon(folio)) goto out; /* * We need the folio lock to read a stable swapcache flag in * write_protect_page(). We trylock because we don't want to wait * here - we prefer to continue scanning and merging different * pages, then come back to this page when it is unlocked. */ if (!folio_trylock(folio)) goto out; if (folio_test_large(folio)) { if (split_huge_page(page)) goto out_unlock; folio = page_folio(page); } /* * If this anonymous page is mapped only here, its pte may need * to be write-protected. If it's mapped elsewhere, all of its * ptes are necessarily already write-protected. But in either * case, we need to lock and check page_count is not raised. */ if (write_protect_page(vma, folio, &orig_pte) == 0) { if (!kpage) { /* * While we hold folio lock, upgrade folio from * anon to a NULL stable_node with the KSM flag set: * stable_tree_insert() will update stable_node. */ folio_set_stable_node(folio, NULL); folio_mark_accessed(folio); /* * Page reclaim just frees a clean folio with no dirty * ptes: make sure that the ksm page would be swapped. */ if (!folio_test_dirty(folio)) folio_mark_dirty(folio); err = 0; } else if (pages_identical(page, kpage)) err = replace_page(vma, page, kpage, orig_pte); } out_unlock: folio_unlock(folio); out: return err; } /* * This function returns 0 if the pages were merged or if they are * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise. */ static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item, struct page *page) { struct mm_struct *mm = rmap_item->mm; int err = -EFAULT; /* * Same checksum as an empty page. We attempt to merge it with the * appropriate zero page if the user enabled this via sysfs. */ if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) { struct vm_area_struct *vma; mmap_read_lock(mm); vma = find_mergeable_vma(mm, rmap_item->address); if (vma) { err = try_to_merge_one_page(vma, page, ZERO_PAGE(rmap_item->address)); trace_ksm_merge_one_page( page_to_pfn(ZERO_PAGE(rmap_item->address)), rmap_item, mm, err); } else { /* * If the vma is out of date, we do not need to * continue. */ err = 0; } mmap_read_unlock(mm); } return err; } /* * try_to_merge_with_ksm_page - like try_to_merge_two_pages, * but no new kernel page is allocated: kpage must already be a ksm page. * * This function returns 0 if the pages were merged, -EFAULT otherwise. */ static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item, struct page *page, struct page *kpage) { struct mm_struct *mm = rmap_item->mm; struct vm_area_struct *vma; int err = -EFAULT; mmap_read_lock(mm); vma = find_mergeable_vma(mm, rmap_item->address); if (!vma) goto out; err = try_to_merge_one_page(vma, page, kpage); if (err) goto out; /* Unstable nid is in union with stable anon_vma: remove first */ remove_rmap_item_from_tree(rmap_item); /* Must get reference to anon_vma while still holding mmap_lock */ rmap_item->anon_vma = vma->anon_vma; get_anon_vma(vma->anon_vma); out: mmap_read_unlock(mm); trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page), rmap_item, mm, err); return err; } /* * try_to_merge_two_pages - take two identical pages and prepare them * to be merged into one page. * * This function returns the kpage if we successfully merged two identical * pages into one ksm page, NULL otherwise. * * Note that this function upgrades page to ksm page: if one of the pages * is already a ksm page, try_to_merge_with_ksm_page should be used. */ static struct folio *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item, struct page *page, struct ksm_rmap_item *tree_rmap_item, struct page *tree_page) { int err; err = try_to_merge_with_ksm_page(rmap_item, page, NULL); if (!err) { err = try_to_merge_with_ksm_page(tree_rmap_item, tree_page, page); /* * If that fails, we have a ksm page with only one pte * pointing to it: so break it. */ if (err) break_cow(rmap_item); } return err ? NULL : page_folio(page); } static __always_inline bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset) { VM_BUG_ON(stable_node->rmap_hlist_len < 0); /* * Check that at least one mapping still exists, otherwise * there's no much point to merge and share with this * stable_node, as the underlying tree_page of the other * sharer is going to be freed soon. */ return stable_node->rmap_hlist_len && stable_node->rmap_hlist_len + offset < ksm_max_page_sharing; } static __always_inline bool is_page_sharing_candidate(struct ksm_stable_node *stable_node) { return __is_page_sharing_candidate(stable_node, 0); } static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup, struct ksm_stable_node **_stable_node, struct rb_root *root, bool prune_stale_stable_nodes) { struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; struct hlist_node *hlist_safe; struct folio *folio, *tree_folio = NULL; int found_rmap_hlist_len; if (!prune_stale_stable_nodes || time_before(jiffies, stable_node->chain_prune_time + msecs_to_jiffies( ksm_stable_node_chains_prune_millisecs))) prune_stale_stable_nodes = false; else stable_node->chain_prune_time = jiffies; hlist_for_each_entry_safe(dup, hlist_safe, &stable_node->hlist, hlist_dup) { cond_resched(); /* * We must walk all stable_node_dup to prune the stale * stable nodes during lookup. * * ksm_get_folio can drop the nodes from the * stable_node->hlist if they point to freed pages * (that's why we do a _safe walk). The "dup" * stable_node parameter itself will be freed from * under us if it returns NULL. */ folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK); if (!folio) continue; /* Pick the best candidate if possible. */ if (!found || (is_page_sharing_candidate(dup) && (!is_page_sharing_candidate(found) || dup->rmap_hlist_len > found_rmap_hlist_len))) { if (found) folio_put(tree_folio); found = dup; found_rmap_hlist_len = found->rmap_hlist_len; tree_folio = folio; /* skip put_page for found candidate */ if (!prune_stale_stable_nodes && is_page_sharing_candidate(found)) break; continue; } folio_put(folio); } if (found) { if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) { /* * If there's not just one entry it would * corrupt memory, better BUG_ON. In KSM * context with no lock held it's not even * fatal. */ BUG_ON(stable_node->hlist.first->next); /* * There's just one entry and it is below the * deduplication limit so drop the chain. */ rb_replace_node(&stable_node->node, &found->node, root); free_stable_node(stable_node); ksm_stable_node_chains--; ksm_stable_node_dups--; /* * NOTE: the caller depends on the stable_node * to be equal to stable_node_dup if the chain * was collapsed. */ *_stable_node = found; /* * Just for robustness, as stable_node is * otherwise left as a stable pointer, the * compiler shall optimize it away at build * time. */ stable_node = NULL; } else if (stable_node->hlist.first != &found->hlist_dup && __is_page_sharing_candidate(found, 1)) { /* * If the found stable_node dup can accept one * more future merge (in addition to the one * that is underway) and is not at the head of * the chain, put it there so next search will * be quicker in the !prune_stale_stable_nodes * case. * * NOTE: it would be inaccurate to use nr > 1 * instead of checking the hlist.first pointer * directly, because in the * prune_stale_stable_nodes case "nr" isn't * the position of the found dup in the chain, * but the total number of dups in the chain. */ hlist_del(&found->hlist_dup); hlist_add_head(&found->hlist_dup, &stable_node->hlist); } } else { /* Its hlist must be empty if no one found. */ free_stable_node_chain(stable_node, root); } *_stable_node_dup = found; return tree_folio; } /* * Like for ksm_get_folio, this function can free the *_stable_node and * *_stable_node_dup if the returned tree_page is NULL. * * It can also free and overwrite *_stable_node with the found * stable_node_dup if the chain is collapsed (in which case * *_stable_node will be equal to *_stable_node_dup like if the chain * never existed). It's up to the caller to verify tree_page is not * NULL before dereferencing *_stable_node or *_stable_node_dup. * * *_stable_node_dup is really a second output parameter of this * function and will be overwritten in all cases, the caller doesn't * need to initialize it. */ static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup, struct ksm_stable_node **_stable_node, struct rb_root *root, bool prune_stale_stable_nodes) { struct ksm_stable_node *stable_node = *_stable_node; if (!is_stable_node_chain(stable_node)) { *_stable_node_dup = stable_node; return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK); } return stable_node_dup(_stable_node_dup, _stable_node, root, prune_stale_stable_nodes); } static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d, struct ksm_stable_node **s_n, struct rb_root *root) { return __stable_node_chain(s_n_d, s_n, root, true); } static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d, struct ksm_stable_node **s_n, struct rb_root *root) { return __stable_node_chain(s_n_d, s_n, root, false); } /* * stable_tree_search - search for page inside the stable tree * * This function checks if there is a page inside the stable tree * with identical content to the page that we are scanning right now. * * This function returns the stable tree node of identical content if found, * -EBUSY if the stable node's page is being migrated, NULL otherwise. */ static struct folio *stable_tree_search(struct page *page) { int nid; struct rb_root *root; struct rb_node **new; struct rb_node *parent; struct ksm_stable_node *stable_node, *stable_node_dup; struct ksm_stable_node *page_node; struct folio *folio; folio = page_folio(page); page_node = folio_stable_node(folio); if (page_node && page_node->head != &migrate_nodes) { /* ksm page forked */ folio_get(folio); return folio; } nid = get_kpfn_nid(folio_pfn(folio)); root = root_stable_tree + nid; again: new = &root->rb_node; parent = NULL; while (*new) { struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); tree_folio = chain_prune(&stable_node_dup, &stable_node, root); if (!tree_folio) { /* * If we walked over a stale stable_node, * ksm_get_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate * false negative insertions just because some * stable_node was stale. */ goto again; } ret = memcmp_pages(page, &tree_folio->page); folio_put(tree_folio); parent = *new; if (ret < 0) new = &parent->rb_left; else if (ret > 0) new = &parent->rb_right; else { if (page_node) { VM_BUG_ON(page_node->head != &migrate_nodes); /* * If the mapcount of our migrated KSM folio is * at most 1, we can merge it with another * KSM folio where we know that we have space * for one more mapping without exceeding the * ksm_max_page_sharing limit: see * chain_prune(). This way, we can avoid adding * this stable node to the chain. */ if (folio_mapcount(folio) > 1) goto chain_append; } if (!is_page_sharing_candidate(stable_node_dup)) { /* * If the stable_node is a chain and * we got a payload match in memcmp * but we cannot merge the scanned * page in any of the existing * stable_node dups because they're * all full, we need to wait the * scanned page to find itself a match * in the unstable tree to create a * brand new KSM page to add later to * the dups of this stable_node. */ return NULL; } /* * Lock and unlock the stable_node's page (which * might already have been migrated) so that page * migration is sure to notice its raised count. * It would be more elegant to return stable_node * than kpage, but that involves more changes. */ tree_folio = ksm_get_folio(stable_node_dup, KSM_GET_FOLIO_TRYLOCK); if (PTR_ERR(tree_folio) == -EBUSY) return ERR_PTR(-EBUSY); if (unlikely(!tree_folio)) /* * The tree may have been rebalanced, * so re-evaluate parent and new. */ goto again; folio_unlock(tree_folio); if (get_kpfn_nid(stable_node_dup->kpfn) != NUMA(stable_node_dup->nid)) { folio_put(tree_folio); goto replace; } return tree_folio; } } if (!page_node) return NULL; list_del(&page_node->list); DO_NUMA(page_node->nid = nid); rb_link_node(&page_node->node, parent, new); rb_insert_color(&page_node->node, root); out: if (is_page_sharing_candidate(page_node)) { folio_get(folio); return folio; } else return NULL; replace: /* * If stable_node was a chain and chain_prune collapsed it, * stable_node has been updated to be the new regular * stable_node. A collapse of the chain is indistinguishable * from the case there was no chain in the stable * rbtree. Otherwise stable_node is the chain and * stable_node_dup is the dup to replace. */ if (stable_node_dup == stable_node) { VM_BUG_ON(is_stable_node_chain(stable_node_dup)); VM_BUG_ON(is_stable_node_dup(stable_node_dup)); /* there is no chain */ if (page_node) { VM_BUG_ON(page_node->head != &migrate_nodes); list_del(&page_node->list); DO_NUMA(page_node->nid = nid); rb_replace_node(&stable_node_dup->node, &page_node->node, root); if (is_page_sharing_candidate(page_node)) folio_get(folio); else folio = NULL; } else { rb_erase(&stable_node_dup->node, root); folio = NULL; } } else { VM_BUG_ON(!is_stable_node_chain(stable_node)); __stable_node_dup_del(stable_node_dup); if (page_node) { VM_BUG_ON(page_node->head != &migrate_nodes); list_del(&page_node->list); DO_NUMA(page_node->nid = nid); stable_node_chain_add_dup(page_node, stable_node); if (is_page_sharing_candidate(page_node)) folio_get(folio); else folio = NULL; } else { folio = NULL; } } stable_node_dup->head = &migrate_nodes; list_add(&stable_node_dup->list, stable_node_dup->head); return folio; chain_append: /* * If stable_node was a chain and chain_prune collapsed it, * stable_node has been updated to be the new regular * stable_node. A collapse of the chain is indistinguishable * from the case there was no chain in the stable * rbtree. Otherwise stable_node is the chain and * stable_node_dup is the dup to replace. */ if (stable_node_dup == stable_node) { VM_BUG_ON(is_stable_node_dup(stable_node_dup)); /* chain is missing so create it */ stable_node = alloc_stable_node_chain(stable_node_dup, root); if (!stable_node) return NULL; } /* * Add this stable_node dup that was * migrated to the stable_node chain * of the current nid for this page * content. */ VM_BUG_ON(!is_stable_node_dup(stable_node_dup)); VM_BUG_ON(page_node->head != &migrate_nodes); list_del(&page_node->list); DO_NUMA(page_node->nid = nid); stable_node_chain_add_dup(page_node, stable_node); goto out; } /* * stable_tree_insert - insert stable tree node pointing to new ksm page * into the stable tree. * * This function returns the stable tree node just allocated on success, * NULL otherwise. */ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) { int nid; unsigned long kpfn; struct rb_root *root; struct rb_node **new; struct rb_node *parent; struct ksm_stable_node *stable_node, *stable_node_dup; bool need_chain = false; kpfn = folio_pfn(kfolio); nid = get_kpfn_nid(kpfn); root = root_stable_tree + nid; again: parent = NULL; new = &root->rb_node; while (*new) { struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); tree_folio = chain(&stable_node_dup, &stable_node, root); if (!tree_folio) { /* * If we walked over a stale stable_node, * ksm_get_folio() will call rb_erase() and it * may rebalance the tree from under us. So * restart the search from scratch. Returning * NULL would be safe too, but we'd generate * false negative insertions just because some * stable_node was stale. */ goto again; } ret = memcmp_pages(&kfolio->page, &tree_folio->page); folio_put(tree_folio); parent = *new; if (ret < 0) new = &parent->rb_left; else if (ret > 0) new = &parent->rb_right; else { need_chain = true; break; } } stable_node_dup = alloc_stable_node(); if (!stable_node_dup) return NULL; INIT_HLIST_HEAD(&stable_node_dup->hlist); stable_node_dup->kpfn = kpfn; stable_node_dup->rmap_hlist_len = 0; DO_NUMA(stable_node_dup->nid = nid); if (!need_chain) { rb_link_node(&stable_node_dup->node, parent, new); rb_insert_color(&stable_node_dup->node, root); } else { if (!is_stable_node_chain(stable_node)) { struct ksm_stable_node *orig = stable_node; /* chain is missing so create it */ stable_node = alloc_stable_node_chain(orig, root); if (!stable_node) { free_stable_node(stable_node_dup); return NULL; } } stable_node_chain_add_dup(stable_node_dup, stable_node); } folio_set_stable_node(kfolio, stable_node_dup); return stable_node_dup; } /* * unstable_tree_search_insert - search for identical page, * else insert rmap_item into the unstable tree. * * This function searches for a page in the unstable tree identical to the * page currently being scanned; and if no identical page is found in the * tree, we insert rmap_item as a new object into the unstable tree. * * This function returns pointer to rmap_item found to be identical * to the currently scanned page, NULL otherwise. * * This function does both searching and inserting, because they share * the same walking algorithm in an rbtree. */ static struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item, struct page *page, struct page **tree_pagep) { struct rb_node **new; struct rb_root *root; struct rb_node *parent = NULL; int nid; nid = get_kpfn_nid(page_to_pfn(page)); root = root_unstable_tree + nid; new = &root->rb_node; while (*new) { struct ksm_rmap_item *tree_rmap_item; struct page *tree_page; int ret; cond_resched(); tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node); tree_page = get_mergeable_page(tree_rmap_item); if (!tree_page) return NULL; /* * Don't substitute a ksm page for a forked page. */ if (page == tree_page) { put_page(tree_page); return NULL; } ret = memcmp_pages(page, tree_page); parent = *new; if (ret < 0) { put_page(tree_page); new = &parent->rb_left; } else if (ret > 0) { put_page(tree_page); new = &parent->rb_right; } else if (!ksm_merge_across_nodes && page_to_nid(tree_page) != nid) { /* * If tree_page has been migrated to another NUMA node, * it will be flushed out and put in the right unstable * tree next time: only merge with it when across_nodes. */ put_page(tree_page); return NULL; } else { *tree_pagep = tree_page; return tree_rmap_item; } } rmap_item->address |= UNSTABLE_FLAG; rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); DO_NUMA(rmap_item->nid = nid); rb_link_node(&rmap_item->node, parent, new); rb_insert_color(&rmap_item->node, root); ksm_pages_unshared++; return NULL; } /* * stable_tree_append - add another rmap_item to the linked list of * rmap_items hanging off a given node of the stable tree, all sharing * the same ksm page. */ static void stable_tree_append(struct ksm_rmap_item *rmap_item, struct ksm_stable_node *stable_node, bool max_page_sharing_bypass) { /* * rmap won't find this mapping if we don't insert the * rmap_item in the right stable_node * duplicate. page_migration could break later if rmap breaks, * so we can as well crash here. We really need to check for * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check * for other negative values as an underflow if detected here * for the first time (and not when decreasing rmap_hlist_len) * would be sign of memory corruption in the stable_node. */ BUG_ON(stable_node->rmap_hlist_len < 0); stable_node->rmap_hlist_len++; if (!max_page_sharing_bypass) /* possibly non fatal but unexpected overflow, only warn */ WARN_ON_ONCE(stable_node->rmap_hlist_len > ksm_max_page_sharing); rmap_item->head = stable_node; rmap_item->address |= STABLE_FLAG; hlist_add_head(&rmap_item->hlist, &stable_node->hlist); if (rmap_item->hlist.next) ksm_pages_sharing++; else ksm_pages_shared++; rmap_item->mm->ksm_merging_pages++; } /* * cmp_and_merge_page - first see if page can be merged into the stable tree; * if not, compare checksum to previous and if it's the same, see if page can * be inserted into the unstable tree, or merged with a page already there and * both transferred to the stable tree. * * @page: the page that we are searching identical page to. * @rmap_item: the reverse mapping into the virtual address of this page */ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item) { struct ksm_rmap_item *tree_rmap_item; struct page *tree_page = NULL; struct ksm_stable_node *stable_node; struct folio *kfolio; unsigned int checksum; int err; bool max_page_sharing_bypass = false; stable_node = page_stable_node(page); if (stable_node) { if (stable_node->head != &migrate_nodes && get_kpfn_nid(READ_ONCE(stable_node->kpfn)) != NUMA(stable_node->nid)) { stable_node_dup_del(stable_node); stable_node->head = &migrate_nodes; list_add(&stable_node->list, stable_node->head); } if (stable_node->head != &migrate_nodes && rmap_item->head == stable_node) return; /* * If it's a KSM fork, allow it to go over the sharing limit * without warnings. */ if (!is_page_sharing_candidate(stable_node)) max_page_sharing_bypass = true; } else { remove_rmap_item_from_tree(rmap_item); /* * If the hash value of the page has changed from the last time * we calculated it, this page is changing frequently: therefore we * don't want to insert it in the unstable tree, and we don't want * to waste our time searching for something identical to it there. */ checksum = calc_checksum(page); if (rmap_item->oldchecksum != checksum) { rmap_item->oldchecksum = checksum; return; } if (!try_to_merge_with_zero_page(rmap_item, page)) return; } /* Start by searching for the folio in the stable tree */ kfolio = stable_tree_search(page); if (&kfolio->page == page && rmap_item->head == stable_node) { folio_put(kfolio); return; } remove_rmap_item_from_tree(rmap_item); if (kfolio) { if (kfolio == ERR_PTR(-EBUSY)) return; err = try_to_merge_with_ksm_page(rmap_item, page, &kfolio->page); if (!err) { /* * The page was successfully merged: * add its rmap_item to the stable tree. */ folio_lock(kfolio); stable_tree_append(rmap_item, folio_stable_node(kfolio), max_page_sharing_bypass); folio_unlock(kfolio); } folio_put(kfolio); return; } tree_rmap_item = unstable_tree_search_insert(rmap_item, page, &tree_page); if (tree_rmap_item) { bool split; kfolio = try_to_merge_two_pages(rmap_item, page, tree_rmap_item, tree_page); /* * If both pages we tried to merge belong to the same compound * page, then we actually ended up increasing the reference * count of the same compound page twice, and split_huge_page * failed. * Here we set a flag if that happened, and we use it later to * try split_huge_page again. Since we call put_page right * afterwards, the reference count will be correct and * split_huge_page should succeed. */ split = PageTransCompound(page) && compound_head(page) == compound_head(tree_page); put_page(tree_page); if (kfolio) { /* * The pages were successfully merged: insert new * node in the stable tree and add both rmap_items. */ folio_lock(kfolio); stable_node = stable_tree_insert(kfolio); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node, false); stable_tree_append(rmap_item, stable_node, false); } folio_unlock(kfolio); /* * If we fail to insert the page into the stable tree, * we will have 2 virtual addresses that are pointing * to a ksm page left outside the stable tree, * in which case we need to break_cow on both. */ if (!stable_node) { break_cow(tree_rmap_item); break_cow(rmap_item); } } else if (split) { /* * We are here if we tried to merge two pages and * failed because they both belonged to the same * compound page. We will split the page now, but no * merging will take place. * We do not want to add the cost of a full lock; if * the page is locked, it is better to skip it and * perhaps try again later. */ if (!trylock_page(page)) return; split_huge_page(page); unlock_page(page); } } } static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot, struct ksm_rmap_item **rmap_list, unsigned long addr) { struct ksm_rmap_item *rmap_item; while (*rmap_list) { rmap_item = *rmap_list; if ((rmap_item->address & PAGE_MASK) == addr) return rmap_item; if (rmap_item->address > addr) break; *rmap_list = rmap_item->rmap_list; remove_rmap_item_from_tree(rmap_item); free_rmap_item(rmap_item); } rmap_item = alloc_rmap_item(); if (rmap_item) { /* It has already been zeroed */ rmap_item->mm = mm_slot->slot.mm; rmap_item->mm->ksm_rmap_items++; rmap_item->address = addr; rmap_item->rmap_list = *rmap_list; *rmap_list = rmap_item; } return rmap_item; } /* * Calculate skip age for the ksm page age. The age determines how often * de-duplicating has already been tried unsuccessfully. If the age is * smaller, the scanning of this page is skipped for less scans. * * @age: rmap_item age of page */ static unsigned int skip_age(rmap_age_t age) { if (age <= 3) return 1; if (age <= 5) return 2; if (age <= 8) return 4; return 8; } /* * Determines if a page should be skipped for the current scan. * * @folio: folio containing the page to check * @rmap_item: associated rmap_item of page */ static bool should_skip_rmap_item(struct folio *folio, struct ksm_rmap_item *rmap_item) { rmap_age_t age; if (!ksm_smart_scan) return false; /* * Never skip pages that are already KSM; pages cmp_and_merge_page() * will essentially ignore them, but we still have to process them * properly. */ if (folio_test_ksm(folio)) return false; age = rmap_item->age; if (age != U8_MAX) rmap_item->age++; /* * Smaller ages are not skipped, they need to get a chance to go * through the different phases of the KSM merging. */ if (age < 3) return false; /* * Are we still allowed to skip? If not, then don't skip it * and determine how much more often we are allowed to skip next. */ if (!rmap_item->remaining_skips) { rmap_item->remaining_skips = skip_age(age); return false; } /* Skip this page */ ksm_pages_skipped++; rmap_item->remaining_skips--; remove_rmap_item_from_tree(rmap_item); return true; } static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) { struct mm_struct *mm; struct ksm_mm_slot *mm_slot; struct mm_slot *slot; struct vm_area_struct *vma; struct ksm_rmap_item *rmap_item; struct vma_iterator vmi; int nid; if (list_empty(&ksm_mm_head.slot.mm_node)) return NULL; mm_slot = ksm_scan.mm_slot; if (mm_slot == &ksm_mm_head) { advisor_start_scan(); trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); /* * A number of pages can hang around indefinitely in per-cpu * LRU cache, raised page count preventing write_protect_page * from merging them. Though it doesn't really matter much, * it is puzzling to see some stuck in pages_volatile until * other activity jostles them out, and they also prevented * LTP's KSM test from succeeding deterministically; so drain * them here (here rather than on entry to ksm_do_scan(), * so we don't IPI too often when pages_to_scan is set low). */ lru_add_drain_all(); /* * Whereas stale stable_nodes on the stable_tree itself * get pruned in the regular course of stable_tree_search(), * those moved out to the migrate_nodes list can accumulate: * so prune them once before each full scan. */ if (!ksm_merge_across_nodes) { struct ksm_stable_node *stable_node, *next; struct folio *folio; list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK); if (folio) folio_put(folio); cond_resched(); } } for (nid = 0; nid < ksm_nr_node_ids; nid++) root_unstable_tree[nid] = RB_ROOT; spin_lock(&ksm_mmlist_lock); slot = list_entry(mm_slot->slot.mm_node.next, struct mm_slot, mm_node); mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); ksm_scan.mm_slot = mm_slot; spin_unlock(&ksm_mmlist_lock); /* * Although we tested list_empty() above, a racing __ksm_exit * of the last mm on the list may have removed it since then. */ if (mm_slot == &ksm_mm_head) return NULL; next_mm: ksm_scan.address = 0; ksm_scan.rmap_list = &mm_slot->rmap_list; } slot = &mm_slot->slot; mm = slot->mm; vma_iter_init(&vmi, mm, ksm_scan.address); mmap_read_lock(mm); if (ksm_test_exit(mm)) goto no_vmas; for_each_vma(vmi, vma) { if (!(vma->vm_flags & VM_MERGEABLE)) continue; if (ksm_scan.address < vma->vm_start) ksm_scan.address = vma->vm_start; if (!vma->anon_vma) ksm_scan.address = vma->vm_end; while (ksm_scan.address < vma->vm_end) { struct page *tmp_page = NULL; struct folio_walk fw; struct folio *folio; if (ksm_test_exit(mm)) break; folio = folio_walk_start(&fw, vma, ksm_scan.address, 0); if (folio) { if (!folio_is_zone_device(folio) && folio_test_anon(folio)) { folio_get(folio); tmp_page = fw.page; } folio_walk_end(&fw, vma); } if (tmp_page) { flush_anon_page(vma, tmp_page, ksm_scan.address); flush_dcache_page(tmp_page); rmap_item = get_next_rmap_item(mm_slot, ksm_scan.rmap_list, ksm_scan.address); if (rmap_item) { ksm_scan.rmap_list = &rmap_item->rmap_list; if (should_skip_rmap_item(folio, rmap_item)) { folio_put(folio); goto next_page; } ksm_scan.address += PAGE_SIZE; *page = tmp_page; } else { folio_put(folio); } mmap_read_unlock(mm); return rmap_item; } next_page: ksm_scan.address += PAGE_SIZE; cond_resched(); } } if (ksm_test_exit(mm)) { no_vmas: ksm_scan.address = 0; ksm_scan.rmap_list = &mm_slot->rmap_list; } /* * Nuke all the rmap_items that are above this current rmap: * because there were no VM_MERGEABLE vmas with such addresses. */ remove_trailing_rmap_items(ksm_scan.rmap_list); spin_lock(&ksm_mmlist_lock); slot = list_entry(mm_slot->slot.mm_node.next, struct mm_slot, mm_node); ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); if (ksm_scan.address == 0) { /* * We've completed a full scan of all vmas, holding mmap_lock * throughout, and found no VM_MERGEABLE: so do the same as * __ksm_exit does to remove this mm from all our lists now. * This applies either when cleaning up after __ksm_exit * (but beware: we can reach here even before __ksm_exit), * or when all VM_MERGEABLE areas have been unmapped (and * mmap_lock then protects against race with MADV_MERGEABLE). */ hash_del(&mm_slot->slot.hash); list_del(&mm_slot->slot.mm_node); spin_unlock(&ksm_mmlist_lock); mm_slot_free(mm_slot_cache, mm_slot); clear_bit(MMF_VM_MERGEABLE, &mm->flags); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); mmap_read_unlock(mm); mmdrop(mm); } else { mmap_read_unlock(mm); /* * mmap_read_unlock(mm) first because after * spin_unlock(&ksm_mmlist_lock) run, the "mm" may * already have been freed under us by __ksm_exit() * because the "mm_slot" is still hashed and * ksm_scan.mm_slot doesn't point to it anymore. */ spin_unlock(&ksm_mmlist_lock); } /* Repeat until we've completed scanning the whole list */ mm_slot = ksm_scan.mm_slot; if (mm_slot != &ksm_mm_head) goto next_mm; advisor_stop_scan(); trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items); ksm_scan.seqnr++; return NULL; } /** * ksm_do_scan - the ksm scanner main worker function. * @scan_npages: number of pages we want to scan before we return. */ static void ksm_do_scan(unsigned int scan_npages) { struct ksm_rmap_item *rmap_item; struct page *page; while (scan_npages-- && likely(!freezing(current))) { cond_resched(); rmap_item = scan_get_next_rmap_item(&page); if (!rmap_item) return; cmp_and_merge_page(page, rmap_item); put_page(page); ksm_pages_scanned++; } } static int ksmd_should_run(void) { return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node); } static int ksm_scan_thread(void *nothing) { unsigned int sleep_ms; set_freezable(); set_user_nice(current, 5); while (!kthread_should_stop()) { mutex_lock(&ksm_thread_mutex); wait_while_offlining(); if (ksmd_should_run()) ksm_do_scan(ksm_thread_pages_to_scan); mutex_unlock(&ksm_thread_mutex); if (ksmd_should_run()) { sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs); wait_event_freezable_timeout(ksm_iter_wait, sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs), msecs_to_jiffies(sleep_ms)); } else { wait_event_freezable(ksm_thread_wait, ksmd_should_run() || kthread_should_stop()); } } return 0; } static void __ksm_add_vma(struct vm_area_struct *vma) { unsigned long vm_flags = vma->vm_flags; if (vm_flags & VM_MERGEABLE) return; if (vma_ksm_compatible(vma)) vm_flags_set(vma, VM_MERGEABLE); } static int __ksm_del_vma(struct vm_area_struct *vma) { int err; if (!(vma->vm_flags & VM_MERGEABLE)) return 0; if (vma->anon_vma) { err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true); if (err) return err; } vm_flags_clear(vma, VM_MERGEABLE); return 0; } /** * ksm_add_vma - Mark vma as mergeable if compatible * * @vma: Pointer to vma */ void ksm_add_vma(struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) __ksm_add_vma(vma); } static void ksm_add_vmas(struct mm_struct *mm) { struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); for_each_vma(vmi, vma) __ksm_add_vma(vma); } static int ksm_del_vmas(struct mm_struct *mm) { struct vm_area_struct *vma; int err; VMA_ITERATOR(vmi, mm, 0); for_each_vma(vmi, vma) { err = __ksm_del_vma(vma); if (err) return err; } return 0; } /** * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all * compatible VMA's * * @mm: Pointer to mm * * Returns 0 on success, otherwise error code */ int ksm_enable_merge_any(struct mm_struct *mm) { int err; if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return 0; if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { err = __ksm_enter(mm); if (err) return err; } set_bit(MMF_VM_MERGE_ANY, &mm->flags); ksm_add_vmas(mm); return 0; } /** * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, * previously enabled via ksm_enable_merge_any(). * * Disabling merging implies unmerging any merged pages, like setting * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and * merging on all compatible VMA's remains enabled. * * @mm: Pointer to mm * * Returns 0 on success, otherwise error code */ int ksm_disable_merge_any(struct mm_struct *mm) { int err; if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return 0; err = ksm_del_vmas(mm); if (err) { ksm_add_vmas(mm); return err; } clear_bit(MMF_VM_MERGE_ANY, &mm->flags); return 0; } int ksm_disable(struct mm_struct *mm) { mmap_assert_write_locked(mm); if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) return 0; if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) return ksm_disable_merge_any(mm); return ksm_del_vmas(mm); } int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { struct mm_struct *mm = vma->vm_mm; int err; switch (advice) { case MADV_MERGEABLE: if (vma->vm_flags & VM_MERGEABLE) return 0; if (!vma_ksm_compatible(vma)) return 0; if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { err = __ksm_enter(mm); if (err) return err; } *vm_flags |= VM_MERGEABLE; break; case MADV_UNMERGEABLE: if (!(*vm_flags & VM_MERGEABLE)) return 0; /* just ignore the advice */ if (vma->anon_vma) { err = unmerge_ksm_pages(vma, start, end, true); if (err) return err; } *vm_flags &= ~VM_MERGEABLE; break; } return 0; } EXPORT_SYMBOL_GPL(ksm_madvise); int __ksm_enter(struct mm_struct *mm) { struct ksm_mm_slot *mm_slot; struct mm_slot *slot; int needs_wakeup; mm_slot = mm_slot_alloc(mm_slot_cache); if (!mm_slot) return -ENOMEM; slot = &mm_slot->slot; /* Check ksm_run too? Would need tighter locking */ needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node); spin_lock(&ksm_mmlist_lock); mm_slot_insert(mm_slots_hash, mm, slot); /* * When KSM_RUN_MERGE (or KSM_RUN_STOP), * insert just behind the scanning cursor, to let the area settle * down a little; when fork is followed by immediate exec, we don't * want ksmd to waste time setting up and tearing down an rmap_list. * * But when KSM_RUN_UNMERGE, it's important to insert ahead of its * scanning cursor, otherwise KSM pages in newly forked mms will be * missed: then we might as well insert at the end of the list. */ if (ksm_run & KSM_RUN_UNMERGE) list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node); else list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); spin_unlock(&ksm_mmlist_lock); set_bit(MMF_VM_MERGEABLE, &mm->flags); mmgrab(mm); if (needs_wakeup) wake_up_interruptible(&ksm_thread_wait); trace_ksm_enter(mm); return 0; } void __ksm_exit(struct mm_struct *mm) { struct ksm_mm_slot *mm_slot; struct mm_slot *slot; int easy_to_free = 0; /* * This process is exiting: if it's straightforward (as is the * case when ksmd was never running), free mm_slot immediately. * But if it's at the cursor or has rmap_items linked to it, use * mmap_lock to synchronize with any break_cows before pagetables * are freed, and leave the mm_slot on the list for ksmd to free. * Beware: ksm may already have noticed it exiting and freed the slot. */ spin_lock(&ksm_mmlist_lock); slot = mm_slot_lookup(mm_slots_hash, mm); mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot); if (mm_slot && ksm_scan.mm_slot != mm_slot) { if (!mm_slot->rmap_list) { hash_del(&slot->hash); list_del(&slot->mm_node); easy_to_free = 1; } else { list_move(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node); } } spin_unlock(&ksm_mmlist_lock); if (easy_to_free) { mm_slot_free(mm_slot_cache, mm_slot); clear_bit(MMF_VM_MERGE_ANY, &mm->flags); clear_bit(MMF_VM_MERGEABLE, &mm->flags); mmdrop(mm); } else if (mm_slot) { mmap_write_lock(mm); mmap_write_unlock(mm); } trace_ksm_exit(mm); } struct folio *ksm_might_need_to_copy(struct folio *folio, struct vm_area_struct *vma, unsigned long addr) { struct page *page = folio_page(folio, 0); struct anon_vma *anon_vma = folio_anon_vma(folio); struct folio *new_folio; if (folio_test_large(folio)) return folio; if (folio_test_ksm(folio)) { if (folio_stable_node(folio) && !(ksm_run & KSM_RUN_UNMERGE)) return folio; /* no need to copy it */ } else if (!anon_vma) { return folio; /* no need to copy it */ } else if (folio->index == linear_page_index(vma, addr) && anon_vma->root == vma->anon_vma->root) { return folio; /* still no need to copy it */ } if (PageHWPoison(page)) return ERR_PTR(-EHWPOISON); if (!folio_test_uptodate(folio)) return folio; /* let do_swap_page report the error */ new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr); if (new_folio && mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) { folio_put(new_folio); new_folio = NULL; } if (new_folio) { if (copy_mc_user_highpage(folio_page(new_folio, 0), page, addr, vma)) { folio_put(new_folio); return ERR_PTR(-EHWPOISON); } folio_set_dirty(new_folio); __folio_mark_uptodate(new_folio); __folio_set_locked(new_folio); #ifdef CONFIG_SWAP count_vm_event(KSM_SWPIN_COPY); #endif } return new_folio; } void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc) { struct ksm_stable_node *stable_node; struct ksm_rmap_item *rmap_item; int search_new_forks = 0; VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio); /* * Rely on the page lock to protect against concurrent modifications * to that page's node of the stable tree. */ VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); stable_node = folio_stable_node(folio); if (!stable_node) return; again: hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *anon_vma = rmap_item->anon_vma; struct anon_vma_chain *vmac; struct vm_area_struct *vma; cond_resched(); if (!anon_vma_trylock_read(anon_vma)) { if (rwc->try_lock) { rwc->contended = true; return; } anon_vma_lock_read(anon_vma); } anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 0, ULONG_MAX) { unsigned long addr; cond_resched(); vma = vmac->vma; /* Ignore the stable/unstable/sqnr flags */ addr = rmap_item->address & PAGE_MASK; if (addr < vma->vm_start || addr >= vma->vm_end) continue; /* * Initially we examine only the vma which covers this * rmap_item; but later, if there is still work to do, * we examine covering vmas in other mms: in case they * were forked from the original since ksmd passed. */ if ((rmap_item->mm == vma->vm_mm) == search_new_forks) continue; if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) continue; if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) { anon_vma_unlock_read(anon_vma); return; } if (rwc->done && rwc->done(folio)) { anon_vma_unlock_read(anon_vma); return; } } anon_vma_unlock_read(anon_vma); } if (!search_new_forks++) goto again; } #ifdef CONFIG_MEMORY_FAILURE /* * Collect processes when the error hit an ksm page. */ void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early) { struct ksm_stable_node *stable_node; struct ksm_rmap_item *rmap_item; struct vm_area_struct *vma; struct task_struct *tsk; stable_node = folio_stable_node(folio); if (!stable_node) return; hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { struct anon_vma *av = rmap_item->anon_vma; anon_vma_lock_read(av); rcu_read_lock(); for_each_process(tsk) { struct anon_vma_chain *vmac; unsigned long addr; struct task_struct *t = task_early_kill(tsk, force_early); if (!t) continue; anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0, ULONG_MAX) { vma = vmac->vma; if (vma->vm_mm == t->mm) { addr = rmap_item->address & PAGE_MASK; add_to_kill_ksm(t, page, vma, to_kill, addr); } } } rcu_read_unlock(); anon_vma_unlock_read(av); } } #endif #ifdef CONFIG_MIGRATION void folio_migrate_ksm(struct folio *newfolio, struct folio *folio) { struct ksm_stable_node *stable_node; VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio); VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio); stable_node = folio_stable_node(folio); if (stable_node) { VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio); stable_node->kpfn = folio_pfn(newfolio); /* * newfolio->mapping was set in advance; now we need smp_wmb() * to make sure that the new stable_node->kpfn is visible * to ksm_get_folio() before it can see that folio->mapping * has gone stale (or that the swapcache flag has been cleared). */ smp_wmb(); folio_set_stable_node(folio, NULL); } } #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_MEMORY_HOTREMOVE static void wait_while_offlining(void) { while (ksm_run & KSM_RUN_OFFLINE) { mutex_unlock(&ksm_thread_mutex); wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), TASK_UNINTERRUPTIBLE); mutex_lock(&ksm_thread_mutex); } } static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node, unsigned long start_pfn, unsigned long end_pfn) { if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) { /* * Don't ksm_get_folio, page has already gone: * which is why we keep kpfn instead of page* */ remove_node_from_stable_tree(stable_node); return true; } return false; } static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node, unsigned long start_pfn, unsigned long end_pfn, struct rb_root *root) { struct ksm_stable_node *dup; struct hlist_node *hlist_safe; if (!is_stable_node_chain(stable_node)) { VM_BUG_ON(is_stable_node_dup(stable_node)); return stable_node_dup_remove_range(stable_node, start_pfn, end_pfn); } hlist_for_each_entry_safe(dup, hlist_safe, &stable_node->hlist, hlist_dup) { VM_BUG_ON(!is_stable_node_dup(dup)); stable_node_dup_remove_range(dup, start_pfn, end_pfn); } if (hlist_empty(&stable_node->hlist)) { free_stable_node_chain(stable_node, root); return true; /* notify caller that tree was rebalanced */ } else return false; } static void ksm_check_stable_tree(unsigned long start_pfn, unsigned long end_pfn) { struct ksm_stable_node *stable_node, *next; struct rb_node *node; int nid; for (nid = 0; nid < ksm_nr_node_ids; nid++) { node = rb_first(root_stable_tree + nid); while (node) { stable_node = rb_entry(node, struct ksm_stable_node, node); if (stable_node_chain_remove_range(stable_node, start_pfn, end_pfn, root_stable_tree + nid)) node = rb_first(root_stable_tree + nid); else node = rb_next(node); cond_resched(); } } list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) { if (stable_node->kpfn >= start_pfn && stable_node->kpfn < end_pfn) remove_node_from_stable_tree(stable_node); cond_resched(); } } static int ksm_memory_callback(struct notifier_block *self, unsigned long action, void *arg) { struct memory_notify *mn = arg; switch (action) { case MEM_GOING_OFFLINE: /* * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() * and remove_all_stable_nodes() while memory is going offline: * it is unsafe for them to touch the stable tree at this time. * But unmerge_ksm_pages(), rmap lookups and other entry points * which do not need the ksm_thread_mutex are all safe. */ mutex_lock(&ksm_thread_mutex); ksm_run |= KSM_RUN_OFFLINE; mutex_unlock(&ksm_thread_mutex); break; case MEM_OFFLINE: /* * Most of the work is done by page migration; but there might * be a few stable_nodes left over, still pointing to struct * pages which have been offlined: prune those from the tree, * otherwise ksm_get_folio() might later try to access a * non-existent struct page. */ ksm_check_stable_tree(mn->start_pfn, mn->start_pfn + mn->nr_pages); fallthrough; case MEM_CANCEL_OFFLINE: mutex_lock(&ksm_thread_mutex); ksm_run &= ~KSM_RUN_OFFLINE; mutex_unlock(&ksm_thread_mutex); smp_mb(); /* wake_up_bit advises this */ wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); break; } return NOTIFY_OK; } #else static void wait_while_offlining(void) { } #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_PROC_FS /* * The process is mergeable only if any VMA is currently * applicable to KSM. * * The mmap lock must be held in read mode. */ bool ksm_process_mergeable(struct mm_struct *mm) { struct vm_area_struct *vma; mmap_assert_locked(mm); VMA_ITERATOR(vmi, mm, 0); for_each_vma(vmi, vma) if (vma->vm_flags & VM_MERGEABLE) return true; return false; } long ksm_process_profit(struct mm_struct *mm) { return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - mm->ksm_rmap_items * sizeof(struct ksm_rmap_item); } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SYSFS /* * This all compiles without CONFIG_SYSFS, but is a waste of space. */ #define KSM_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) #define KSM_ATTR(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RW(_name) static ssize_t sleep_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs); } static ssize_t sleep_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int msecs; int err; err = kstrtouint(buf, 10, &msecs); if (err) return -EINVAL; ksm_thread_sleep_millisecs = msecs; wake_up_interruptible(&ksm_iter_wait); return count; } KSM_ATTR(sleep_millisecs); static ssize_t pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan); } static ssize_t pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int nr_pages; int err; if (ksm_advisor != KSM_ADVISOR_NONE) return -EINVAL; err = kstrtouint(buf, 10, &nr_pages); if (err) return -EINVAL; ksm_thread_pages_to_scan = nr_pages; return count; } KSM_ATTR(pages_to_scan); static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_run); } static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int flags; int err; err = kstrtouint(buf, 10, &flags); if (err) return -EINVAL; if (flags > KSM_RUN_UNMERGE) return -EINVAL; /* * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, * breaking COW to free the pages_shared (but leaves mm_slots * on the list for when ksmd may be set running again). */ mutex_lock(&ksm_thread_mutex); wait_while_offlining(); if (ksm_run != flags) { ksm_run = flags; if (flags & KSM_RUN_UNMERGE) { set_current_oom_origin(); err = unmerge_and_remove_all_rmap_items(); clear_current_oom_origin(); if (err) { ksm_run = KSM_RUN_STOP; count = err; } } } mutex_unlock(&ksm_thread_mutex); if (flags & KSM_RUN_MERGE) wake_up_interruptible(&ksm_thread_wait); return count; } KSM_ATTR(run); #ifdef CONFIG_NUMA static ssize_t merge_across_nodes_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes); } static ssize_t merge_across_nodes_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long knob; err = kstrtoul(buf, 10, &knob); if (err) return err; if (knob > 1) return -EINVAL; mutex_lock(&ksm_thread_mutex); wait_while_offlining(); if (ksm_merge_across_nodes != knob) { if (ksm_pages_shared || remove_all_stable_nodes()) err = -EBUSY; else if (root_stable_tree == one_stable_tree) { struct rb_root *buf; /* * This is the first time that we switch away from the * default of merging across nodes: must now allocate * a buffer to hold as many roots as may be needed. * Allocate stable and unstable together: * MAXSMP NODES_SHIFT 10 will use 16kB. */ buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), GFP_KERNEL); /* Let us assume that RB_ROOT is NULL is zero */ if (!buf) err = -ENOMEM; else { root_stable_tree = buf; root_unstable_tree = buf + nr_node_ids; /* Stable tree is empty but not the unstable */ root_unstable_tree[0] = one_unstable_tree[0]; } } if (!err) { ksm_merge_across_nodes = knob; ksm_nr_node_ids = knob ? 1 : nr_node_ids; } } mutex_unlock(&ksm_thread_mutex); return err ? err : count; } KSM_ATTR(merge_across_nodes); #endif static ssize_t use_zero_pages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_use_zero_pages); } static ssize_t use_zero_pages_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; bool value; err = kstrtobool(buf, &value); if (err) return -EINVAL; ksm_use_zero_pages = value; return count; } KSM_ATTR(use_zero_pages); static ssize_t max_page_sharing_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_max_page_sharing); } static ssize_t max_page_sharing_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; int knob; err = kstrtoint(buf, 10, &knob); if (err) return err; /* * When a KSM page is created it is shared by 2 mappings. This * being a signed comparison, it implicitly verifies it's not * negative. */ if (knob < 2) return -EINVAL; if (READ_ONCE(ksm_max_page_sharing) == knob) return count; mutex_lock(&ksm_thread_mutex); wait_while_offlining(); if (ksm_max_page_sharing != knob) { if (ksm_pages_shared || remove_all_stable_nodes()) err = -EBUSY; else ksm_max_page_sharing = knob; } mutex_unlock(&ksm_thread_mutex); return err ? err : count; } KSM_ATTR(max_page_sharing); static ssize_t pages_scanned_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_scanned); } KSM_ATTR_RO(pages_scanned); static ssize_t pages_shared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_shared); } KSM_ATTR_RO(pages_shared); static ssize_t pages_sharing_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_sharing); } KSM_ATTR_RO(pages_sharing); static ssize_t pages_unshared_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_unshared); } KSM_ATTR_RO(pages_unshared); static ssize_t pages_volatile_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { long ksm_pages_volatile; ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared - ksm_pages_sharing - ksm_pages_unshared; /* * It was not worth any locking to calculate that statistic, * but it might therefore sometimes be negative: conceal that. */ if (ksm_pages_volatile < 0) ksm_pages_volatile = 0; return sysfs_emit(buf, "%ld\n", ksm_pages_volatile); } KSM_ATTR_RO(pages_volatile); static ssize_t pages_skipped_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_pages_skipped); } KSM_ATTR_RO(pages_skipped); static ssize_t ksm_zero_pages_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages)); } KSM_ATTR_RO(ksm_zero_pages); static ssize_t general_profit_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { long general_profit; general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE - ksm_rmap_items * sizeof(struct ksm_rmap_item); return sysfs_emit(buf, "%ld\n", general_profit); } KSM_ATTR_RO(general_profit); static ssize_t stable_node_dups_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups); } KSM_ATTR_RO(stable_node_dups); static ssize_t stable_node_chains_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains); } KSM_ATTR_RO(stable_node_chains); static ssize_t stable_node_chains_prune_millisecs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs); } static ssize_t stable_node_chains_prune_millisecs_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { unsigned int msecs; int err; err = kstrtouint(buf, 10, &msecs); if (err) return -EINVAL; ksm_stable_node_chains_prune_millisecs = msecs; return count; } KSM_ATTR(stable_node_chains_prune_millisecs); static ssize_t full_scans_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr); } KSM_ATTR_RO(full_scans); static ssize_t smart_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_smart_scan); } static ssize_t smart_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; bool value; err = kstrtobool(buf, &value); if (err) return -EINVAL; ksm_smart_scan = value; return count; } KSM_ATTR(smart_scan); static ssize_t advisor_mode_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { const char *output; if (ksm_advisor == KSM_ADVISOR_NONE) output = "[none] scan-time"; else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) output = "none [scan-time]"; return sysfs_emit(buf, "%s\n", output); } static ssize_t advisor_mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { enum ksm_advisor_type curr_advisor = ksm_advisor; if (sysfs_streq("scan-time", buf)) ksm_advisor = KSM_ADVISOR_SCAN_TIME; else if (sysfs_streq("none", buf)) ksm_advisor = KSM_ADVISOR_NONE; else return -EINVAL; /* Set advisor default values */ if (curr_advisor != ksm_advisor) set_advisor_defaults(); return count; } KSM_ATTR(advisor_mode); static ssize_t advisor_max_cpu_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu); } static ssize_t advisor_max_cpu_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long value; err = kstrtoul(buf, 10, &value); if (err) return -EINVAL; ksm_advisor_max_cpu = value; return count; } KSM_ATTR(advisor_max_cpu); static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan); } static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long value; err = kstrtoul(buf, 10, &value); if (err) return -EINVAL; ksm_advisor_min_pages_to_scan = value; return count; } KSM_ATTR(advisor_min_pages_to_scan); static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan); } static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long value; err = kstrtoul(buf, 10, &value); if (err) return -EINVAL; ksm_advisor_max_pages_to_scan = value; return count; } KSM_ATTR(advisor_max_pages_to_scan); static ssize_t advisor_target_scan_time_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time); } static ssize_t advisor_target_scan_time_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int err; unsigned long value; err = kstrtoul(buf, 10, &value); if (err) return -EINVAL; if (value < 1) return -EINVAL; ksm_advisor_target_scan_time = value; return count; } KSM_ATTR(advisor_target_scan_time); static struct attribute *ksm_attrs[] = { &sleep_millisecs_attr.attr, &pages_to_scan_attr.attr, &run_attr.attr, &pages_scanned_attr.attr, &pages_shared_attr.attr, &pages_sharing_attr.attr, &pages_unshared_attr.attr, &pages_volatile_attr.attr, &pages_skipped_attr.attr, &ksm_zero_pages_attr.attr, &full_scans_attr.attr, #ifdef CONFIG_NUMA &merge_across_nodes_attr.attr, #endif &max_page_sharing_attr.attr, &stable_node_chains_attr.attr, &stable_node_dups_attr.attr, &stable_node_chains_prune_millisecs_attr.attr, &use_zero_pages_attr.attr, &general_profit_attr.attr, &smart_scan_attr.attr, &advisor_mode_attr.attr, &advisor_max_cpu_attr.attr, &advisor_min_pages_to_scan_attr.attr, &advisor_max_pages_to_scan_attr.attr, &advisor_target_scan_time_attr.attr, NULL, }; static const struct attribute_group ksm_attr_group = { .attrs = ksm_attrs, .name = "ksm", }; #endif /* CONFIG_SYSFS */ static int __init ksm_init(void) { struct task_struct *ksm_thread; int err; /* The correct value depends on page size and endianness */ zero_checksum = calc_checksum(ZERO_PAGE(0)); /* Default to false for backwards compatibility */ ksm_use_zero_pages = false; err = ksm_slab_init(); if (err) goto out; ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); if (IS_ERR(ksm_thread)) { pr_err("ksm: creating kthread failed\n"); err = PTR_ERR(ksm_thread); goto out_free; } #ifdef CONFIG_SYSFS err = sysfs_create_group(mm_kobj, &ksm_attr_group); if (err) { pr_err("ksm: register sysfs failed\n"); kthread_stop(ksm_thread); goto out_free; } #else ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ #endif /* CONFIG_SYSFS */ #ifdef CONFIG_MEMORY_HOTREMOVE /* There is no significance to this priority 100 */ hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI); #endif return 0; out_free: ksm_slab_free(); out: return err; } subsys_initcall(ksm_init); |
| 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Behringer BCD2000 driver * * Copyright (C) 2014 Mario Kicherer (dev@kicherer.org) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitmap.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #define PREFIX "snd-bcd2000: " #define BUFSIZE 64 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1397, 0x00bd) }, { }, }; static const unsigned char device_cmd_prefix[] = {0x03, 0x00}; static const unsigned char bcd2000_init_sequence[] = { 0x07, 0x00, 0x00, 0x00, 0x78, 0x48, 0x1c, 0x81, 0xc4, 0x00, 0x00, 0x00, 0x5e, 0x53, 0x4a, 0xf7, 0x18, 0xfa, 0x11, 0xff, 0x6c, 0xf3, 0x90, 0xff, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x18, 0xfa, 0x11, 0xff, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x34, 0x4a, 0xf7, 0x18, 0xfa, 0x11, 0xff }; struct bcd2000 { struct usb_device *dev; struct snd_card *card; struct usb_interface *intf; int card_index; int midi_out_active; struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *midi_receive_substream; struct snd_rawmidi_substream *midi_out_substream; unsigned char midi_in_buf[BUFSIZE]; unsigned char midi_out_buf[BUFSIZE]; struct urb *midi_out_urb; struct urb *midi_in_urb; struct usb_anchor anchor; }; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static DEFINE_MUTEX(devices_mutex); static DECLARE_BITMAP(devices_used, SNDRV_CARDS); static struct usb_driver bcd2000_driver; #ifdef CONFIG_SND_DEBUG static void bcd2000_dump_buffer(const char *prefix, const char *buf, int len) { print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE, 16, 1, buf, len, false); } #else static void bcd2000_dump_buffer(const char *prefix, const char *buf, int len) {} #endif static int bcd2000_midi_input_open(struct snd_rawmidi_substream *substream) { return 0; } static int bcd2000_midi_input_close(struct snd_rawmidi_substream *substream) { return 0; } /* (de)register midi substream from client */ static void bcd2000_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct bcd2000 *bcd2k = substream->rmidi->private_data; bcd2k->midi_receive_substream = up ? substream : NULL; } static void bcd2000_midi_handle_input(struct bcd2000 *bcd2k, const unsigned char *buf, unsigned int buf_len) { unsigned int payload_length, tocopy; struct snd_rawmidi_substream *midi_receive_substream; midi_receive_substream = READ_ONCE(bcd2k->midi_receive_substream); if (!midi_receive_substream) return; bcd2000_dump_buffer(PREFIX "received from device: ", buf, buf_len); if (buf_len < 2) return; payload_length = buf[0]; /* ignore packets without payload */ if (payload_length == 0) return; tocopy = min(payload_length, buf_len-1); bcd2000_dump_buffer(PREFIX "sending to userspace: ", &buf[1], tocopy); snd_rawmidi_receive(midi_receive_substream, &buf[1], tocopy); } static void bcd2000_midi_send(struct bcd2000 *bcd2k) { int len, ret; struct snd_rawmidi_substream *midi_out_substream; BUILD_BUG_ON(sizeof(device_cmd_prefix) >= BUFSIZE); midi_out_substream = READ_ONCE(bcd2k->midi_out_substream); if (!midi_out_substream) return; /* copy command prefix bytes */ memcpy(bcd2k->midi_out_buf, device_cmd_prefix, sizeof(device_cmd_prefix)); /* * get MIDI packet and leave space for command prefix * and payload length */ len = snd_rawmidi_transmit(midi_out_substream, bcd2k->midi_out_buf + 3, BUFSIZE - 3); if (len < 0) dev_err(&bcd2k->dev->dev, "%s: snd_rawmidi_transmit error %d\n", __func__, len); if (len <= 0) return; /* set payload length */ bcd2k->midi_out_buf[2] = len; bcd2k->midi_out_urb->transfer_buffer_length = BUFSIZE; bcd2000_dump_buffer(PREFIX "sending to device: ", bcd2k->midi_out_buf, len+3); /* send packet to the BCD2000 */ ret = usb_submit_urb(bcd2k->midi_out_urb, GFP_ATOMIC); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s (%p): usb_submit_urb() failed, ret=%d, len=%d\n", __func__, midi_out_substream, ret, len); else bcd2k->midi_out_active = 1; } static int bcd2000_midi_output_open(struct snd_rawmidi_substream *substream) { return 0; } static int bcd2000_midi_output_close(struct snd_rawmidi_substream *substream) { struct bcd2000 *bcd2k = substream->rmidi->private_data; if (bcd2k->midi_out_active) { usb_kill_urb(bcd2k->midi_out_urb); bcd2k->midi_out_active = 0; } return 0; } /* (de)register midi substream from client */ static void bcd2000_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { struct bcd2000 *bcd2k = substream->rmidi->private_data; if (up) { bcd2k->midi_out_substream = substream; /* check if there is data userspace wants to send */ if (!bcd2k->midi_out_active) bcd2000_midi_send(bcd2k); } else { bcd2k->midi_out_substream = NULL; } } static void bcd2000_output_complete(struct urb *urb) { struct bcd2000 *bcd2k = urb->context; bcd2k->midi_out_active = 0; if (urb->status) dev_warn(&urb->dev->dev, PREFIX "output urb->status: %d\n", urb->status); if (urb->status == -ESHUTDOWN) return; /* check if there is more data userspace wants to send */ bcd2000_midi_send(bcd2k); } static void bcd2000_input_complete(struct urb *urb) { int ret; struct bcd2000 *bcd2k = urb->context; if (urb->status) dev_warn(&urb->dev->dev, PREFIX "input urb->status: %i\n", urb->status); if (!bcd2k || urb->status == -ESHUTDOWN) return; if (urb->actual_length > 0) bcd2000_midi_handle_input(bcd2k, urb->transfer_buffer, urb->actual_length); /* return URB to device */ ret = usb_submit_urb(bcd2k->midi_in_urb, GFP_ATOMIC); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() failed, ret=%d\n", __func__, ret); } static const struct snd_rawmidi_ops bcd2000_midi_output = { .open = bcd2000_midi_output_open, .close = bcd2000_midi_output_close, .trigger = bcd2000_midi_output_trigger, }; static const struct snd_rawmidi_ops bcd2000_midi_input = { .open = bcd2000_midi_input_open, .close = bcd2000_midi_input_close, .trigger = bcd2000_midi_input_trigger, }; static void bcd2000_init_device(struct bcd2000 *bcd2k) { int ret; init_usb_anchor(&bcd2k->anchor); usb_anchor_urb(bcd2k->midi_out_urb, &bcd2k->anchor); usb_anchor_urb(bcd2k->midi_in_urb, &bcd2k->anchor); /* copy init sequence into buffer */ memcpy(bcd2k->midi_out_buf, bcd2000_init_sequence, 52); bcd2k->midi_out_urb->transfer_buffer_length = 52; /* submit sequence */ ret = usb_submit_urb(bcd2k->midi_out_urb, GFP_KERNEL); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() out failed, ret=%d: ", __func__, ret); else bcd2k->midi_out_active = 1; /* pass URB to device to enable button and controller events */ ret = usb_submit_urb(bcd2k->midi_in_urb, GFP_KERNEL); if (ret < 0) dev_err(&bcd2k->dev->dev, PREFIX "%s: usb_submit_urb() in failed, ret=%d: ", __func__, ret); /* ensure initialization is finished */ usb_wait_anchor_empty_timeout(&bcd2k->anchor, 1000); } static int bcd2000_init_midi(struct bcd2000 *bcd2k) { int ret; struct snd_rawmidi *rmidi; ret = snd_rawmidi_new(bcd2k->card, bcd2k->card->shortname, 0, 1, /* output */ 1, /* input */ &rmidi); if (ret < 0) return ret; strscpy(rmidi->name, bcd2k->card->shortname, sizeof(rmidi->name)); rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = bcd2k; rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &bcd2000_midi_output); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_INPUT; snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &bcd2000_midi_input); bcd2k->rmidi = rmidi; bcd2k->midi_in_urb = usb_alloc_urb(0, GFP_KERNEL); bcd2k->midi_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!bcd2k->midi_in_urb || !bcd2k->midi_out_urb) { dev_err(&bcd2k->dev->dev, PREFIX "usb_alloc_urb failed\n"); return -ENOMEM; } usb_fill_int_urb(bcd2k->midi_in_urb, bcd2k->dev, usb_rcvintpipe(bcd2k->dev, 0x81), bcd2k->midi_in_buf, BUFSIZE, bcd2000_input_complete, bcd2k, 1); usb_fill_int_urb(bcd2k->midi_out_urb, bcd2k->dev, usb_sndintpipe(bcd2k->dev, 0x1), bcd2k->midi_out_buf, BUFSIZE, bcd2000_output_complete, bcd2k, 1); /* sanity checks of EPs before actually submitting */ if (usb_urb_ep_type_check(bcd2k->midi_in_urb) || usb_urb_ep_type_check(bcd2k->midi_out_urb)) { dev_err(&bcd2k->dev->dev, "invalid MIDI EP\n"); return -EINVAL; } bcd2000_init_device(bcd2k); return 0; } static void bcd2000_free_usb_related_resources(struct bcd2000 *bcd2k, struct usb_interface *interface) { usb_kill_urb(bcd2k->midi_out_urb); usb_kill_urb(bcd2k->midi_in_urb); usb_free_urb(bcd2k->midi_out_urb); usb_free_urb(bcd2k->midi_in_urb); if (bcd2k->intf) { usb_set_intfdata(bcd2k->intf, NULL); bcd2k->intf = NULL; } } static int bcd2000_probe(struct usb_interface *interface, const struct usb_device_id *usb_id) { struct snd_card *card; struct bcd2000 *bcd2k; unsigned int card_index; char usb_path[32]; int err; mutex_lock(&devices_mutex); for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) if (!test_bit(card_index, devices_used)) break; if (card_index >= SNDRV_CARDS) { mutex_unlock(&devices_mutex); return -ENOENT; } err = snd_card_new(&interface->dev, index[card_index], id[card_index], THIS_MODULE, sizeof(*bcd2k), &card); if (err < 0) { mutex_unlock(&devices_mutex); return err; } bcd2k = card->private_data; bcd2k->dev = interface_to_usbdev(interface); bcd2k->card = card; bcd2k->card_index = card_index; bcd2k->intf = interface; snd_card_set_dev(card, &interface->dev); strscpy(card->driver, "snd-bcd2000", sizeof(card->driver)); strscpy(card->shortname, "BCD2000", sizeof(card->shortname)); usb_make_path(bcd2k->dev, usb_path, sizeof(usb_path)); snprintf(bcd2k->card->longname, sizeof(bcd2k->card->longname), "Behringer BCD2000 at %s", usb_path); err = bcd2000_init_midi(bcd2k); if (err < 0) goto probe_error; err = snd_card_register(card); if (err < 0) goto probe_error; usb_set_intfdata(interface, bcd2k); set_bit(card_index, devices_used); mutex_unlock(&devices_mutex); return 0; probe_error: dev_info(&bcd2k->dev->dev, PREFIX "error during probing"); bcd2000_free_usb_related_resources(bcd2k, interface); snd_card_free(card); mutex_unlock(&devices_mutex); return err; } static void bcd2000_disconnect(struct usb_interface *interface) { struct bcd2000 *bcd2k = usb_get_intfdata(interface); if (!bcd2k) return; mutex_lock(&devices_mutex); /* make sure that userspace cannot create new requests */ snd_card_disconnect(bcd2k->card); bcd2000_free_usb_related_resources(bcd2k, interface); clear_bit(bcd2k->card_index, devices_used); snd_card_free_when_closed(bcd2k->card); mutex_unlock(&devices_mutex); } static struct usb_driver bcd2000_driver = { .name = "snd-bcd2000", .probe = bcd2000_probe, .disconnect = bcd2000_disconnect, .id_table = id_table, }; module_usb_driver(bcd2000_driver); MODULE_DEVICE_TABLE(usb, id_table); MODULE_AUTHOR("Mario Kicherer, dev@kicherer.org"); MODULE_DESCRIPTION("Behringer BCD2000 driver"); MODULE_LICENSE("GPL"); |
| 21 6 6 3 2 3 45 46 5 5 5 1 5 1 1 1 1 1 1 1 30 9 9 25 29 29 6 6 6 6 25 6 2 24 24 24 24 24 1 25 25 24 10 25 24 24 23 6 6 23 12 5 4 22 13 11 10 10 10 4 4 4 4 10 6 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 | // SPDX-License-Identifier: GPL-2.0-only /* Miscellaneous routines. * * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/swap.h> #include "internal.h" /** * netfs_alloc_folioq_buffer - Allocate buffer space into a folio queue * @mapping: Address space to set on the folio (or NULL). * @_buffer: Pointer to the folio queue to add to (may point to a NULL; updated). * @_cur_size: Current size of the buffer (updated). * @size: Target size of the buffer. * @gfp: The allocation constraints. */ int netfs_alloc_folioq_buffer(struct address_space *mapping, struct folio_queue **_buffer, size_t *_cur_size, ssize_t size, gfp_t gfp) { struct folio_queue *tail = *_buffer, *p; size = round_up(size, PAGE_SIZE); if (*_cur_size >= size) return 0; if (tail) while (tail->next) tail = tail->next; do { struct folio *folio; int order = 0, slot; if (!tail || folioq_full(tail)) { p = netfs_folioq_alloc(0, GFP_NOFS, netfs_trace_folioq_alloc_buffer); if (!p) return -ENOMEM; if (tail) { tail->next = p; p->prev = tail; } else { *_buffer = p; } tail = p; } if (size - *_cur_size > PAGE_SIZE) order = umin(ilog2(size - *_cur_size) - PAGE_SHIFT, MAX_PAGECACHE_ORDER); folio = folio_alloc(gfp, order); if (!folio && order > 0) folio = folio_alloc(gfp, 0); if (!folio) return -ENOMEM; folio->mapping = mapping; folio->index = *_cur_size / PAGE_SIZE; trace_netfs_folio(folio, netfs_folio_trace_alloc_buffer); slot = folioq_append_mark(tail, folio); *_cur_size += folioq_folio_size(tail, slot); } while (*_cur_size < size); return 0; } EXPORT_SYMBOL(netfs_alloc_folioq_buffer); /** * netfs_free_folioq_buffer - Free a folio queue. * @fq: The start of the folio queue to free * * Free up a chain of folio_queues and, if marked, the marked folios they point * to. */ void netfs_free_folioq_buffer(struct folio_queue *fq) { struct folio_queue *next; struct folio_batch fbatch; folio_batch_init(&fbatch); for (; fq; fq = next) { for (int slot = 0; slot < folioq_count(fq); slot++) { struct folio *folio = folioq_folio(fq, slot); if (!folio || !folioq_is_marked(fq, slot)) continue; trace_netfs_folio(folio, netfs_folio_trace_put); if (folio_batch_add(&fbatch, folio)) folio_batch_release(&fbatch); } netfs_stat_d(&netfs_n_folioq); next = fq->next; kfree(fq); } folio_batch_release(&fbatch); } EXPORT_SYMBOL(netfs_free_folioq_buffer); /* * Reset the subrequest iterator to refer just to the region remaining to be * read. The iterator may or may not have been advanced by socket ops or * extraction ops to an extent that may or may not match the amount actually * read. */ void netfs_reset_iter(struct netfs_io_subrequest *subreq) { struct iov_iter *io_iter = &subreq->io_iter; size_t remain = subreq->len - subreq->transferred; if (io_iter->count > remain) iov_iter_advance(io_iter, io_iter->count - remain); else if (io_iter->count < remain) iov_iter_revert(io_iter, remain - io_iter->count); iov_iter_truncate(&subreq->io_iter, remain); } /** * netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback * @mapping: The mapping the folio belongs to. * @folio: The folio being dirtied. * * Set the dirty flag on a folio and pin an in-use cache object in memory so * that writeback can later write to it. This is intended to be called from * the filesystem's ->dirty_folio() method. * * Return: true if the dirty flag was set on the folio, false otherwise. */ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio) { struct inode *inode = mapping->host; struct netfs_inode *ictx = netfs_inode(inode); struct fscache_cookie *cookie = netfs_i_cookie(ictx); bool need_use = false; _enter(""); if (!filemap_dirty_folio(mapping, folio)) return false; if (!fscache_cookie_valid(cookie)) return true; if (!(inode->i_state & I_PINNING_NETFS_WB)) { spin_lock(&inode->i_lock); if (!(inode->i_state & I_PINNING_NETFS_WB)) { inode->i_state |= I_PINNING_NETFS_WB; need_use = true; } spin_unlock(&inode->i_lock); if (need_use) fscache_use_cookie(cookie, true); } return true; } EXPORT_SYMBOL(netfs_dirty_folio); /** * netfs_unpin_writeback - Unpin writeback resources * @inode: The inode on which the cookie resides * @wbc: The writeback control * * Unpin the writeback resources pinned by netfs_dirty_folio(). This is * intended to be called as/by the netfs's ->write_inode() method. */ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc) { struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); if (wbc->unpinned_netfs_wb) fscache_unuse_cookie(cookie, NULL, NULL); return 0; } EXPORT_SYMBOL(netfs_unpin_writeback); /** * netfs_clear_inode_writeback - Clear writeback resources pinned by an inode * @inode: The inode to clean up * @aux: Auxiliary data to apply to the inode * * Clear any writeback resources held by an inode when the inode is evicted. * This must be called before clear_inode() is called. */ void netfs_clear_inode_writeback(struct inode *inode, const void *aux) { struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode)); if (inode->i_state & I_PINNING_NETFS_WB) { loff_t i_size = i_size_read(inode); fscache_unuse_cookie(cookie, aux, &i_size); } } EXPORT_SYMBOL(netfs_clear_inode_writeback); /** * netfs_invalidate_folio - Invalidate or partially invalidate a folio * @folio: Folio proposed for release * @offset: Offset of the invalidated region * @length: Length of the invalidated region * * Invalidate part or all of a folio for a network filesystem. The folio will * be removed afterwards if the invalidated region covers the entire folio. */ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct netfs_folio *finfo; struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); size_t flen = folio_size(folio); _enter("{%lx},%zx,%zx", folio->index, offset, length); if (offset == 0 && length == flen) { unsigned long long i_size = i_size_read(&ctx->inode); unsigned long long fpos = folio_pos(folio), end; end = umin(fpos + flen, i_size); if (fpos < i_size && end > ctx->zero_point) ctx->zero_point = end; } folio_wait_private_2(folio); /* [DEPRECATED] */ if (!folio_test_private(folio)) return; finfo = netfs_folio_info(folio); if (offset == 0 && length >= flen) goto erase_completely; if (finfo) { /* We have a partially uptodate page from a streaming write. */ unsigned int fstart = finfo->dirty_offset; unsigned int fend = fstart + finfo->dirty_len; unsigned int iend = offset + length; if (offset >= fend) return; if (iend <= fstart) return; /* The invalidation region overlaps the data. If the region * covers the start of the data, we either move along the start * or just erase the data entirely. */ if (offset <= fstart) { if (iend >= fend) goto erase_completely; /* Move the start of the data. */ finfo->dirty_len = fend - iend; finfo->dirty_offset = offset; return; } /* Reduce the length of the data if the invalidation region * covers the tail part. */ if (iend >= fend) { finfo->dirty_len = offset - fstart; return; } /* A partial write was split. The caller has already zeroed * it, so just absorb the hole. */ } return; erase_completely: netfs_put_group(netfs_folio_group(folio)); folio_detach_private(folio); folio_clear_uptodate(folio); kfree(finfo); return; } EXPORT_SYMBOL(netfs_invalidate_folio); /** * netfs_release_folio - Try to release a folio * @folio: Folio proposed for release * @gfp: Flags qualifying the release * * Request release of a folio and clean up its private state if it's not busy. * Returns true if the folio can now be released, false if not */ bool netfs_release_folio(struct folio *folio, gfp_t gfp) { struct netfs_inode *ctx = netfs_inode(folio_inode(folio)); unsigned long long end; if (folio_test_dirty(folio)) return false; end = umin(folio_pos(folio) + folio_size(folio), i_size_read(&ctx->inode)); if (end > ctx->zero_point) ctx->zero_point = end; if (folio_test_private(folio)) return false; if (unlikely(folio_test_private_2(folio))) { /* [DEPRECATED] */ if (current_is_kswapd() || !(gfp & __GFP_FS)) return false; folio_wait_private_2(folio); } fscache_note_page_release(netfs_i_cookie(ctx)); return true; } EXPORT_SYMBOL(netfs_release_folio); /* * Wake the collection work item. */ void netfs_wake_collector(struct netfs_io_request *rreq) { if (test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags) && !test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) { queue_work(system_unbound_wq, &rreq->work); } else { trace_netfs_rreq(rreq, netfs_rreq_trace_wake_queue); wake_up(&rreq->waitq); } } /* * Mark a subrequest as no longer being in progress and, if need be, wake the * collector. */ void netfs_subreq_clear_in_progress(struct netfs_io_subrequest *subreq) { struct netfs_io_request *rreq = subreq->rreq; struct netfs_io_stream *stream = &rreq->io_streams[subreq->stream_nr]; clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); smp_mb__after_atomic(); /* Clear IN_PROGRESS before task state */ /* If we are at the head of the queue, wake up the collector. */ if (list_is_first(&subreq->rreq_link, &stream->subrequests) || test_bit(NETFS_RREQ_RETRYING, &rreq->flags)) netfs_wake_collector(rreq); } /* * Wait for all outstanding I/O in a stream to quiesce. */ void netfs_wait_for_in_progress_stream(struct netfs_io_request *rreq, struct netfs_io_stream *stream) { struct netfs_io_subrequest *subreq; DEFINE_WAIT(myself); list_for_each_entry(subreq, &stream->subrequests, rreq_link) { if (!netfs_check_subreq_in_progress(subreq)) continue; trace_netfs_rreq(rreq, netfs_rreq_trace_wait_quiesce); for (;;) { prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); if (!netfs_check_subreq_in_progress(subreq)) break; trace_netfs_sreq(subreq, netfs_sreq_trace_wait_for); schedule(); } } trace_netfs_rreq(rreq, netfs_rreq_trace_waited_quiesce); finish_wait(&rreq->waitq, &myself); } /* * Perform collection in app thread if not offloaded to workqueue. */ static int netfs_collect_in_app(struct netfs_io_request *rreq, bool (*collector)(struct netfs_io_request *rreq)) { bool need_collect = false, inactive = true, done = true; if (!netfs_check_rreq_in_progress(rreq)) { trace_netfs_rreq(rreq, netfs_rreq_trace_recollect); return 1; /* Done */ } for (int i = 0; i < NR_IO_STREAMS; i++) { struct netfs_io_subrequest *subreq; struct netfs_io_stream *stream = &rreq->io_streams[i]; if (!stream->active) continue; inactive = false; trace_netfs_collect_stream(rreq, stream); subreq = list_first_entry_or_null(&stream->subrequests, struct netfs_io_subrequest, rreq_link); if (subreq && (!netfs_check_subreq_in_progress(subreq) || test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) { need_collect = true; break; } if (subreq || !test_bit(NETFS_RREQ_ALL_QUEUED, &rreq->flags)) done = false; } if (!need_collect && !inactive && !done) return 0; /* Sleep */ __set_current_state(TASK_RUNNING); if (collector(rreq)) { /* Drop the ref from the NETFS_RREQ_IN_PROGRESS flag. */ netfs_put_request(rreq, netfs_rreq_trace_put_work_ip); return 1; /* Done */ } if (inactive) { WARN(true, "Failed to collect inactive req R=%08x\n", rreq->debug_id); cond_resched(); } return 2; /* Again */ } /* * Wait for a request to complete, successfully or otherwise. */ static ssize_t netfs_wait_for_in_progress(struct netfs_io_request *rreq, bool (*collector)(struct netfs_io_request *rreq)) { DEFINE_WAIT(myself); ssize_t ret; for (;;) { prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) { switch (netfs_collect_in_app(rreq, collector)) { case 0: break; case 1: goto all_collected; case 2: if (!netfs_check_rreq_in_progress(rreq)) break; cond_resched(); continue; } } if (!netfs_check_rreq_in_progress(rreq)) break; trace_netfs_rreq(rreq, netfs_rreq_trace_wait_ip); schedule(); } all_collected: trace_netfs_rreq(rreq, netfs_rreq_trace_waited_ip); finish_wait(&rreq->waitq, &myself); ret = rreq->error; if (ret == 0) { ret = rreq->transferred; switch (rreq->origin) { case NETFS_DIO_READ: case NETFS_DIO_WRITE: case NETFS_READ_SINGLE: case NETFS_UNBUFFERED_READ: case NETFS_UNBUFFERED_WRITE: break; default: if (rreq->submitted < rreq->len) { trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read); ret = -EIO; } break; } } return ret; } ssize_t netfs_wait_for_read(struct netfs_io_request *rreq) { return netfs_wait_for_in_progress(rreq, netfs_read_collection); } ssize_t netfs_wait_for_write(struct netfs_io_request *rreq) { return netfs_wait_for_in_progress(rreq, netfs_write_collection); } /* * Wait for a paused operation to unpause or complete in some manner. */ static void netfs_wait_for_pause(struct netfs_io_request *rreq, bool (*collector)(struct netfs_io_request *rreq)) { DEFINE_WAIT(myself); for (;;) { trace_netfs_rreq(rreq, netfs_rreq_trace_wait_pause); prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE); if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) { switch (netfs_collect_in_app(rreq, collector)) { case 0: break; case 1: goto all_collected; case 2: if (!netfs_check_rreq_in_progress(rreq) || !test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) break; cond_resched(); continue; } } if (!netfs_check_rreq_in_progress(rreq) || !test_bit(NETFS_RREQ_PAUSE, &rreq->flags)) break; schedule(); } all_collected: trace_netfs_rreq(rreq, netfs_rreq_trace_waited_pause); finish_wait(&rreq->waitq, &myself); } void netfs_wait_for_paused_read(struct netfs_io_request *rreq) { return netfs_wait_for_pause(rreq, netfs_read_collection); } void netfs_wait_for_paused_write(struct netfs_io_request *rreq) { return netfs_wait_for_pause(rreq, netfs_write_collection); } |
| 32 33 33 338 334 335 3 334 334 333 331 335 335 337 175 175 174 94 68 95 95 74 74 75 72 68 67 68 68 3 8 102 100 100 97 99 68 67 36 84 55 84 83 71 69 70 68 55 54 55 55 8 17 106 104 103 94 104 55 54 51 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 | // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/readdir.c * * Copyright (C) 1995 Linus Torvalds */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/time.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/stat.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/dirent.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/unistd.h> #include <linux/compat.h> #include <linux/uaccess.h> /* * Some filesystems were never converted to '->iterate_shared()' * and their directory iterators want the inode lock held for * writing. This wrapper allows for converting from the shared * semantics to the exclusive inode use. */ int wrap_directory_iterator(struct file *file, struct dir_context *ctx, int (*iter)(struct file *, struct dir_context *)) { struct inode *inode = file_inode(file); int ret; /* * We'd love to have an 'inode_upgrade_trylock()' operation, * see the comment in mmap_upgrade_trylock() in mm/memory.c. * * But considering this is for "filesystems that never got * converted", it really doesn't matter. * * Also note that since we have to return with the lock held * for reading, we can't use the "killable()" locking here, * since we do need to get the lock even if we're dying. * * We could do the write part killably and then get the read * lock unconditionally if it mattered, but see above on why * this does the very simplistic conversion. */ up_read(&inode->i_rwsem); down_write(&inode->i_rwsem); /* * Since we dropped the inode lock, we should do the * DEADDIR test again. See 'iterate_dir()' below. * * Note that we don't need to re-do the f_pos games, * since the file must be locked wrt f_pos anyway. */ ret = -ENOENT; if (!IS_DEADDIR(inode)) ret = iter(file, ctx); downgrade_write(&inode->i_rwsem); return ret; } EXPORT_SYMBOL(wrap_directory_iterator); /* * Note the "unsafe_put_user()" semantics: we goto a * label for errors. */ #define unsafe_copy_dirent_name(_dst, _src, _len, label) do { \ char __user *dst = (_dst); \ const char *src = (_src); \ size_t len = (_len); \ unsafe_put_user(0, dst+len, label); \ unsafe_copy_to_user(dst, src, len, label); \ } while (0) int iterate_dir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); int res = -ENOTDIR; if (!file->f_op->iterate_shared) goto out; res = security_file_permission(file, MAY_READ); if (res) goto out; res = fsnotify_file_perm(file, MAY_READ); if (res) goto out; res = down_read_killable(&inode->i_rwsem); if (res) goto out; res = -ENOENT; if (!IS_DEADDIR(inode)) { ctx->pos = file->f_pos; res = file->f_op->iterate_shared(file, ctx); file->f_pos = ctx->pos; fsnotify_access(file); file_accessed(file); } inode_unlock_shared(inode); out: return res; } EXPORT_SYMBOL(iterate_dir); /* * POSIX says that a dirent name cannot contain NULL or a '/'. * * It's not 100% clear what we should really do in this case. * The filesystem is clearly corrupted, but returning a hard * error means that you now don't see any of the other names * either, so that isn't a perfect alternative. * * And if you return an error, what error do you use? Several * filesystems seem to have decided on EUCLEAN being the error * code for EFSCORRUPTED, and that may be the error to use. Or * just EIO, which is perhaps more obvious to users. * * In order to see the other file names in the directory, the * caller might want to make this a "soft" error: skip the * entry, and return the error at the end instead. * * Note that this should likely do a "memchr(name, 0, len)" * check too, since that would be filesystem corruption as * well. However, that case can't actually confuse user space, * which has to do a strlen() on the name anyway to find the * filename length, and the above "soft error" worry means * that it's probably better left alone until we have that * issue clarified. * * Note the PATH_MAX check - it's arbitrary but the real * kernel limit on a possible path component, not NAME_MAX, * which is the technical standard limit. */ static int verify_dirent_name(const char *name, int len) { if (len <= 0 || len >= PATH_MAX) return -EIO; if (memchr(name, '/', len)) return -EIO; return 0; } /* * Traditional linux readdir() handling.. * * "count=1" is a special case, meaning that the buffer is one * dirent-structure in size and that the code can't handle more * anyway. Thus the special "fillonedir()" function for that * case (the low-level handlers don't need to care about this). */ #ifdef __ARCH_WANT_OLD_READDIR struct old_linux_dirent { unsigned long d_ino; unsigned long d_offset; unsigned short d_namlen; char d_name[]; }; struct readdir_callback { struct dir_context ctx; struct old_linux_dirent __user * dirent; int result; }; static bool fillonedir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct readdir_callback *buf = container_of(ctx, struct readdir_callback, ctx); struct old_linux_dirent __user * dirent; unsigned long d_ino; if (buf->result) return false; buf->result = verify_dirent_name(name, namlen); if (buf->result) return false; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->result = -EOVERFLOW; return false; } buf->result++; dirent = buf->dirent; if (!user_write_access_begin(dirent, (unsigned long)(dirent->d_name + namlen + 1) - (unsigned long)dirent)) goto efault; unsafe_put_user(d_ino, &dirent->d_ino, efault_end); unsafe_put_user(offset, &dirent->d_offset, efault_end); unsafe_put_user(namlen, &dirent->d_namlen, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); user_write_access_end(); return true; efault_end: user_write_access_end(); efault: buf->result = -EFAULT; return false; } SYSCALL_DEFINE3(old_readdir, unsigned int, fd, struct old_linux_dirent __user *, dirent, unsigned int, count) { int error; CLASS(fd_pos, f)(fd); struct readdir_callback buf = { .ctx.actor = fillonedir, .ctx.count = 1, /* Hint to fs: just one entry. */ .dirent = dirent }; if (fd_empty(f)) return -EBADF; error = iterate_dir(fd_file(f), &buf.ctx); if (buf.result) error = buf.result; return error; } #endif /* __ARCH_WANT_OLD_READDIR */ /* * New, all-improved, singing, dancing, iBCS2-compliant getdents() * interface. */ struct linux_dirent { unsigned long d_ino; unsigned long d_off; unsigned short d_reclen; char d_name[]; }; struct getdents_callback { struct dir_context ctx; struct linux_dirent __user * current_dir; int prev_reclen; int error; }; static bool filldir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct linux_dirent __user *dirent, *prev; struct getdents_callback *buf = container_of(ctx, struct getdents_callback, ctx); unsigned long d_ino; int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2, sizeof(long)); int prev_reclen; unsigned int flags = d_type; BUILD_BUG_ON(FILLDIR_FLAG_NOINTR & S_DT_MASK); d_type &= S_DT_MASK; buf->error = verify_dirent_name(name, namlen); if (unlikely(buf->error)) return false; buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > ctx->count) return false; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return false; } prev_reclen = buf->prev_reclen; if (!(flags & FILLDIR_FLAG_NOINTR) && prev_reclen && signal_pending(current)) return false; dirent = buf->current_dir; prev = (void __user *) dirent - prev_reclen; if (!user_write_access_begin(prev, reclen + prev_reclen)) goto efault; /* This might be 'dirent->d_off', but if so it will get overwritten */ unsafe_put_user(offset, &prev->d_off, efault_end); unsafe_put_user(d_ino, &dirent->d_ino, efault_end); unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); user_write_access_end(); buf->current_dir = (void __user *)dirent + reclen; buf->prev_reclen = reclen; ctx->count -= reclen; return true; efault_end: user_write_access_end(); efault: buf->error = -EFAULT; return false; } SYSCALL_DEFINE3(getdents, unsigned int, fd, struct linux_dirent __user *, dirent, unsigned int, count) { CLASS(fd_pos, f)(fd); struct getdents_callback buf = { .ctx.actor = filldir, .ctx.count = count, .current_dir = dirent }; int error; if (fd_empty(f)) return -EBADF; error = iterate_dir(fd_file(f), &buf.ctx); if (error >= 0) error = buf.error; if (buf.prev_reclen) { struct linux_dirent __user * lastdirent; lastdirent = (void __user *)buf.current_dir - buf.prev_reclen; if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.ctx.count; } return error; } struct getdents_callback64 { struct dir_context ctx; struct linux_dirent64 __user * current_dir; int prev_reclen; int error; }; static bool filldir64(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct linux_dirent64 __user *dirent, *prev; struct getdents_callback64 *buf = container_of(ctx, struct getdents_callback64, ctx); int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1, sizeof(u64)); int prev_reclen; unsigned int flags = d_type; BUILD_BUG_ON(FILLDIR_FLAG_NOINTR & S_DT_MASK); d_type &= S_DT_MASK; buf->error = verify_dirent_name(name, namlen); if (unlikely(buf->error)) return false; buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > ctx->count) return false; prev_reclen = buf->prev_reclen; if (!(flags & FILLDIR_FLAG_NOINTR) && prev_reclen && signal_pending(current)) return false; dirent = buf->current_dir; prev = (void __user *)dirent - prev_reclen; if (!user_write_access_begin(prev, reclen + prev_reclen)) goto efault; /* This might be 'dirent->d_off', but if so it will get overwritten */ unsafe_put_user(offset, &prev->d_off, efault_end); unsafe_put_user(ino, &dirent->d_ino, efault_end); unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, &dirent->d_type, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); user_write_access_end(); buf->prev_reclen = reclen; buf->current_dir = (void __user *)dirent + reclen; ctx->count -= reclen; return true; efault_end: user_write_access_end(); efault: buf->error = -EFAULT; return false; } SYSCALL_DEFINE3(getdents64, unsigned int, fd, struct linux_dirent64 __user *, dirent, unsigned int, count) { CLASS(fd_pos, f)(fd); struct getdents_callback64 buf = { .ctx.actor = filldir64, .ctx.count = count, .current_dir = dirent }; int error; if (fd_empty(f)) return -EBADF; error = iterate_dir(fd_file(f), &buf.ctx); if (error >= 0) error = buf.error; if (buf.prev_reclen) { struct linux_dirent64 __user * lastdirent; typeof(lastdirent->d_off) d_off = buf.ctx.pos; lastdirent = (void __user *) buf.current_dir - buf.prev_reclen; if (put_user(d_off, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.ctx.count; } return error; } #ifdef CONFIG_COMPAT struct compat_old_linux_dirent { compat_ulong_t d_ino; compat_ulong_t d_offset; unsigned short d_namlen; char d_name[]; }; struct compat_readdir_callback { struct dir_context ctx; struct compat_old_linux_dirent __user *dirent; int result; }; static bool compat_fillonedir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct compat_readdir_callback *buf = container_of(ctx, struct compat_readdir_callback, ctx); struct compat_old_linux_dirent __user *dirent; compat_ulong_t d_ino; if (buf->result) return false; buf->result = verify_dirent_name(name, namlen); if (buf->result) return false; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->result = -EOVERFLOW; return false; } buf->result++; dirent = buf->dirent; if (!user_write_access_begin(dirent, (unsigned long)(dirent->d_name + namlen + 1) - (unsigned long)dirent)) goto efault; unsafe_put_user(d_ino, &dirent->d_ino, efault_end); unsafe_put_user(offset, &dirent->d_offset, efault_end); unsafe_put_user(namlen, &dirent->d_namlen, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); user_write_access_end(); return true; efault_end: user_write_access_end(); efault: buf->result = -EFAULT; return false; } COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd, struct compat_old_linux_dirent __user *, dirent, unsigned int, count) { int error; CLASS(fd_pos, f)(fd); struct compat_readdir_callback buf = { .ctx.actor = compat_fillonedir, .ctx.count = 1, /* Hint to fs: just one entry. */ .dirent = dirent }; if (fd_empty(f)) return -EBADF; error = iterate_dir(fd_file(f), &buf.ctx); if (buf.result) error = buf.result; return error; } struct compat_linux_dirent { compat_ulong_t d_ino; compat_ulong_t d_off; unsigned short d_reclen; char d_name[]; }; struct compat_getdents_callback { struct dir_context ctx; struct compat_linux_dirent __user *current_dir; int prev_reclen; int error; }; static bool compat_filldir(struct dir_context *ctx, const char *name, int namlen, loff_t offset, u64 ino, unsigned int d_type) { struct compat_linux_dirent __user *dirent, *prev; struct compat_getdents_callback *buf = container_of(ctx, struct compat_getdents_callback, ctx); compat_ulong_t d_ino; int reclen = ALIGN(offsetof(struct compat_linux_dirent, d_name) + namlen + 2, sizeof(compat_long_t)); int prev_reclen; unsigned int flags = d_type; BUILD_BUG_ON(FILLDIR_FLAG_NOINTR & S_DT_MASK); d_type &= S_DT_MASK; buf->error = verify_dirent_name(name, namlen); if (unlikely(buf->error)) return false; buf->error = -EINVAL; /* only used if we fail.. */ if (reclen > ctx->count) return false; d_ino = ino; if (sizeof(d_ino) < sizeof(ino) && d_ino != ino) { buf->error = -EOVERFLOW; return false; } prev_reclen = buf->prev_reclen; if (!(flags & FILLDIR_FLAG_NOINTR) && prev_reclen && signal_pending(current)) return false; dirent = buf->current_dir; prev = (void __user *) dirent - prev_reclen; if (!user_write_access_begin(prev, reclen + prev_reclen)) goto efault; unsafe_put_user(offset, &prev->d_off, efault_end); unsafe_put_user(d_ino, &dirent->d_ino, efault_end); unsafe_put_user(reclen, &dirent->d_reclen, efault_end); unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end); unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end); user_write_access_end(); buf->prev_reclen = reclen; buf->current_dir = (void __user *)dirent + reclen; ctx->count -= reclen; return true; efault_end: user_write_access_end(); efault: buf->error = -EFAULT; return false; } COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, struct compat_linux_dirent __user *, dirent, unsigned int, count) { CLASS(fd_pos, f)(fd); struct compat_getdents_callback buf = { .ctx.actor = compat_filldir, .ctx.count = count, .current_dir = dirent, }; int error; if (fd_empty(f)) return -EBADF; error = iterate_dir(fd_file(f), &buf.ctx); if (error >= 0) error = buf.error; if (buf.prev_reclen) { struct compat_linux_dirent __user * lastdirent; lastdirent = (void __user *)buf.current_dir - buf.prev_reclen; if (put_user(buf.ctx.pos, &lastdirent->d_off)) error = -EFAULT; else error = count - buf.ctx.count; } return error; } #endif |
| 3 3 3 3 3 3 3 3 3 3 3 3 3 1 3 3 3 3 3 3 3 3 3 1 1 1 3 3 3 3 3 4 1 4 4 3 4 4 4 4 4 3 3 3 3 1 4 4 3 2 1 2 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 | // SPDX-License-Identifier: GPL-2.0-or-later /* * PRNG: Pseudo Random Number Generator * Based on NIST Recommended PRNG From ANSI X9.31 Appendix A.2.4 using * AES 128 cipher * * (C) Neil Horman <nhorman@tuxdriver.com> */ #include <crypto/internal/cipher.h> #include <crypto/internal/rng.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #define DEFAULT_PRNG_KEY "0123456789abcdef" #define DEFAULT_PRNG_KSZ 16 #define DEFAULT_BLK_SZ 16 #define DEFAULT_V_SEED "zaybxcwdveuftgsh" /* * Flags for the prng_context flags field */ #define PRNG_FIXED_SIZE 0x1 #define PRNG_NEED_RESET 0x2 /* * Note: DT is our counter value * I is our intermediate value * V is our seed vector * See http://csrc.nist.gov/groups/STM/cavp/documents/rng/931rngext.pdf * for implementation details */ struct prng_context { spinlock_t prng_lock; unsigned char rand_data[DEFAULT_BLK_SZ]; unsigned char last_rand_data[DEFAULT_BLK_SZ]; unsigned char DT[DEFAULT_BLK_SZ]; unsigned char I[DEFAULT_BLK_SZ]; unsigned char V[DEFAULT_BLK_SZ]; u32 rand_data_valid; struct crypto_cipher *tfm; u32 flags; }; static int dbg; static void hexdump(char *note, unsigned char *buf, unsigned int len) { if (dbg) { printk(KERN_CRIT "%s", note); print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, 16, 1, buf, len, false); } } #define dbgprint(format, args...) do {\ if (dbg)\ printk(format, ##args);\ } while (0) static void xor_vectors(unsigned char *in1, unsigned char *in2, unsigned char *out, unsigned int size) { int i; for (i = 0; i < size; i++) out[i] = in1[i] ^ in2[i]; } /* * Returns DEFAULT_BLK_SZ bytes of random data per call * returns 0 if generation succeeded, <0 if something went wrong */ static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) { int i; unsigned char tmp[DEFAULT_BLK_SZ]; unsigned char *output = NULL; dbgprint(KERN_CRIT "Calling _get_more_prng_bytes for context %p\n", ctx); hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ); hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ); hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ); /* * This algorithm is a 3 stage state machine */ for (i = 0; i < 3; i++) { switch (i) { case 0: /* * Start by encrypting the counter value * This gives us an intermediate value I */ memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ); output = ctx->I; hexdump("tmp stage 0: ", tmp, DEFAULT_BLK_SZ); break; case 1: /* * Next xor I with our secret vector V * encrypt that result to obtain our * pseudo random data which we output */ xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ); hexdump("tmp stage 1: ", tmp, DEFAULT_BLK_SZ); output = ctx->rand_data; break; case 2: /* * First check that we didn't produce the same * random data that we did last time around through this */ if (!memcmp(ctx->rand_data, ctx->last_rand_data, DEFAULT_BLK_SZ)) { if (cont_test) { panic("cprng %p Failed repetition check!\n", ctx); } printk(KERN_ERR "ctx %p Failed repetition check!\n", ctx); ctx->flags |= PRNG_NEED_RESET; return -EINVAL; } memcpy(ctx->last_rand_data, ctx->rand_data, DEFAULT_BLK_SZ); /* * Lastly xor the random data with I * and encrypt that to obtain a new secret vector V */ xor_vectors(ctx->rand_data, ctx->I, tmp, DEFAULT_BLK_SZ); output = ctx->V; hexdump("tmp stage 2: ", tmp, DEFAULT_BLK_SZ); break; } /* do the encryption */ crypto_cipher_encrypt_one(ctx->tfm, output, tmp); } /* * Now update our DT value */ for (i = DEFAULT_BLK_SZ - 1; i >= 0; i--) { ctx->DT[i] += 1; if (ctx->DT[i] != 0) break; } dbgprint("Returning new block for context %p\n", ctx); ctx->rand_data_valid = 0; hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ); hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ); hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ); hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ); return 0; } /* Our exported functions */ static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, int do_cont_test) { unsigned char *ptr = buf; unsigned int byte_count = (unsigned int)nbytes; int err; spin_lock_bh(&ctx->prng_lock); err = -EINVAL; if (ctx->flags & PRNG_NEED_RESET) goto done; /* * If the FIXED_SIZE flag is on, only return whole blocks of * pseudo random data */ err = -EINVAL; if (ctx->flags & PRNG_FIXED_SIZE) { if (nbytes < DEFAULT_BLK_SZ) goto done; byte_count = DEFAULT_BLK_SZ; } /* * Return 0 in case of success as mandated by the kernel * crypto API interface definition. */ err = 0; dbgprint(KERN_CRIT "getting %d random bytes for context %p\n", byte_count, ctx); remainder: if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { memset(buf, 0, nbytes); err = -EINVAL; goto done; } } /* * Copy any data less than an entire block */ if (byte_count < DEFAULT_BLK_SZ) { empty_rbuf: while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { *ptr = ctx->rand_data[ctx->rand_data_valid]; ptr++; byte_count--; ctx->rand_data_valid++; if (byte_count == 0) goto done; } } /* * Now copy whole blocks */ for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) { if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { memset(buf, 0, nbytes); err = -EINVAL; goto done; } } if (ctx->rand_data_valid > 0) goto empty_rbuf; memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ); ctx->rand_data_valid += DEFAULT_BLK_SZ; ptr += DEFAULT_BLK_SZ; } /* * Now go back and get any remaining partial block */ if (byte_count) goto remainder; done: spin_unlock_bh(&ctx->prng_lock); dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n", err, ctx); return err; } static void free_prng_context(struct prng_context *ctx) { crypto_free_cipher(ctx->tfm); } static int reset_prng_context(struct prng_context *ctx, const unsigned char *key, size_t klen, const unsigned char *V, const unsigned char *DT) { int ret; const unsigned char *prng_key; spin_lock_bh(&ctx->prng_lock); ctx->flags |= PRNG_NEED_RESET; prng_key = (key != NULL) ? key : (unsigned char *)DEFAULT_PRNG_KEY; if (!key) klen = DEFAULT_PRNG_KSZ; if (V) memcpy(ctx->V, V, DEFAULT_BLK_SZ); else memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ); if (DT) memcpy(ctx->DT, DT, DEFAULT_BLK_SZ); else memset(ctx->DT, 0, DEFAULT_BLK_SZ); memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); ctx->rand_data_valid = DEFAULT_BLK_SZ; ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); if (ret) { dbgprint(KERN_CRIT "PRNG: setkey() failed flags=%x\n", crypto_cipher_get_flags(ctx->tfm)); goto out; } ret = 0; ctx->flags &= ~PRNG_NEED_RESET; out: spin_unlock_bh(&ctx->prng_lock); return ret; } static int cprng_init(struct crypto_tfm *tfm) { struct prng_context *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->prng_lock); ctx->tfm = crypto_alloc_cipher("aes", 0, 0); if (IS_ERR(ctx->tfm)) { dbgprint(KERN_CRIT "Failed to alloc tfm for context %p\n", ctx); return PTR_ERR(ctx->tfm); } if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) return -EINVAL; /* * after allocation, we should always force the user to reset * so they don't inadvertently use the insecure default values * without specifying them intentially */ ctx->flags |= PRNG_NEED_RESET; return 0; } static void cprng_exit(struct crypto_tfm *tfm) { free_prng_context(crypto_tfm_ctx(tfm)); } static int cprng_get_random(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *rdata, unsigned int dlen) { struct prng_context *prng = crypto_rng_ctx(tfm); return get_prng_bytes(rdata, dlen, prng, 0); } /* * This is the cprng_registered reset method the seed value is * interpreted as the tuple { V KEY DT} * V and KEY are required during reset, and DT is optional, detected * as being present by testing the length of the seed */ static int cprng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { struct prng_context *prng = crypto_rng_ctx(tfm); const u8 *key = seed + DEFAULT_BLK_SZ; const u8 *dt = NULL; if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) return -EINVAL; if (slen >= (2 * DEFAULT_BLK_SZ + DEFAULT_PRNG_KSZ)) dt = key + DEFAULT_PRNG_KSZ; reset_prng_context(prng, key, DEFAULT_PRNG_KSZ, seed, dt); if (prng->flags & PRNG_NEED_RESET) return -EINVAL; return 0; } #ifdef CONFIG_CRYPTO_FIPS static int fips_cprng_get_random(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *rdata, unsigned int dlen) { struct prng_context *prng = crypto_rng_ctx(tfm); return get_prng_bytes(rdata, dlen, prng, 1); } static int fips_cprng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { u8 rdata[DEFAULT_BLK_SZ]; const u8 *key = seed + DEFAULT_BLK_SZ; int rc; struct prng_context *prng = crypto_rng_ctx(tfm); if (slen < DEFAULT_PRNG_KSZ + DEFAULT_BLK_SZ) return -EINVAL; /* fips strictly requires seed != key */ if (!memcmp(seed, key, DEFAULT_PRNG_KSZ)) return -EINVAL; rc = cprng_reset(tfm, seed, slen); if (!rc) goto out; /* this primes our continuity test */ rc = get_prng_bytes(rdata, DEFAULT_BLK_SZ, prng, 0); prng->rand_data_valid = DEFAULT_BLK_SZ; out: return rc; } #endif static struct rng_alg rng_algs[] = { { .generate = cprng_get_random, .seed = cprng_reset, .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, .base = { .cra_name = "stdrng", .cra_driver_name = "ansi_cprng", .cra_priority = 100, .cra_ctxsize = sizeof(struct prng_context), .cra_module = THIS_MODULE, .cra_init = cprng_init, .cra_exit = cprng_exit, } #ifdef CONFIG_CRYPTO_FIPS }, { .generate = fips_cprng_get_random, .seed = fips_cprng_reset, .seedsize = DEFAULT_PRNG_KSZ + 2 * DEFAULT_BLK_SZ, .base = { .cra_name = "fips(ansi_cprng)", .cra_driver_name = "fips_ansi_cprng", .cra_priority = 300, .cra_ctxsize = sizeof(struct prng_context), .cra_module = THIS_MODULE, .cra_init = cprng_init, .cra_exit = cprng_exit, } #endif } }; /* Module initalization */ static int __init prng_mod_init(void) { return crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs)); } static void __exit prng_mod_fini(void) { crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs)); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Software Pseudo Random Number Generator"); MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); module_param(dbg, int, 0); MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); module_init(prng_mod_init); module_exit(prng_mod_fini); MODULE_ALIAS_CRYPTO("stdrng"); MODULE_ALIAS_CRYPTO("ansi_cprng"); MODULE_IMPORT_NS("CRYPTO_INTERNAL"); |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 | /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated Copyright 2023-2024 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __HCI_H #define __HCI_H #define HCI_MAX_ACL_SIZE 1024 #define HCI_MAX_SCO_SIZE 255 #define HCI_MAX_ISO_SIZE 251 #define HCI_MAX_ISO_BIS 31 #define HCI_MAX_EVENT_SIZE 260 #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) #define HCI_LINK_KEY_SIZE 16 #define HCI_MAX_CPB_DATA_SIZE 252 /* HCI dev events */ #define HCI_DEV_REG 1 #define HCI_DEV_UNREG 2 #define HCI_DEV_UP 3 #define HCI_DEV_DOWN 4 #define HCI_DEV_SUSPEND 5 #define HCI_DEV_RESUME 6 #define HCI_DEV_OPEN 7 #define HCI_DEV_CLOSE 8 #define HCI_DEV_SETUP 9 /* HCI notify events */ #define HCI_NOTIFY_CONN_ADD 1 #define HCI_NOTIFY_CONN_DEL 2 #define HCI_NOTIFY_VOICE_SETTING 3 #define HCI_NOTIFY_ENABLE_SCO_CVSD 4 #define HCI_NOTIFY_ENABLE_SCO_TRANSP 5 #define HCI_NOTIFY_DISABLE_SCO 6 /* HCI bus types */ #define HCI_VIRTUAL 0 #define HCI_USB 1 #define HCI_PCCARD 2 #define HCI_UART 3 #define HCI_RS232 4 #define HCI_PCI 5 #define HCI_SDIO 6 #define HCI_SPI 7 #define HCI_I2C 8 #define HCI_SMD 9 #define HCI_VIRTIO 10 #define HCI_IPC 11 /* HCI device quirks */ enum { /* When this quirk is set, the HCI Reset command is send when * closing the transport instead of when opening it. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_RESET_ON_CLOSE, /* When this quirk is set, the device is turned into a raw-only * device and it will stay in unconfigured state. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_RAW_DEVICE, /* When this quirk is set, the buffer sizes reported by * HCI Read Buffer Size command are corrected if invalid. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_FIXUP_BUFFER_SIZE, /* When this quirk is set, then a controller that does not * indicate support for Inquiry Result with RSSI is assumed to * support it anyway. Some early Bluetooth 1.2 controllers had * wrongly configured local features that will require forcing * them to enable this mode. Getting RSSI information with the * inquiry responses is preferred since it allows for a better * user experience. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_FIXUP_INQUIRY_MODE, /* When this quirk is set, then the HCI Read Local Supported * Commands command is not supported. In general Bluetooth 1.2 * and later controllers should support this command. However * some controllers indicate Bluetooth 1.2 support, but do * not support this command. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_BROKEN_LOCAL_COMMANDS, /* When this quirk is set, then no stored link key handling * is performed. This is mainly due to the fact that the * HCI Delete Stored Link Key command is advertised, but * not supported. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_BROKEN_STORED_LINK_KEY, /* When this quirk is set, an external configuration step * is required and will be indicated with the controller * configuration. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_EXTERNAL_CONFIG, /* When this quirk is set, the public Bluetooth address * initially reported by HCI Read BD Address command * is considered invalid. Controller configuration is * required before this device can be used. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_INVALID_BDADDR, /* When this quirk is set, the public Bluetooth address * initially reported by HCI Read BD Address command * is considered invalid. The public BD Address can be * specified in the fwnode property 'local-bd-address'. * If this property does not exist or is invalid controller * configuration is required before this device can be used. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_USE_BDADDR_PROPERTY, /* When this quirk is set, the Bluetooth Device Address provided by * the 'local-bd-address' fwnode property is incorrectly specified in * big-endian order. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_BDADDR_PROPERTY_BROKEN, /* When this quirk is set, the duplicate filtering during * scanning is based on Bluetooth devices addresses. To allow * RSSI based updates, restart scanning if needed. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_STRICT_DUPLICATE_FILTER, /* When this quirk is set, LE scan and BR/EDR inquiry is done * simultaneously, otherwise it's interleaved. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_SIMULTANEOUS_DISCOVERY, /* When this quirk is set, the enabling of diagnostic mode is * not persistent over HCI Reset. Every time the controller * is brought up it needs to be reprogrammed. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_NON_PERSISTENT_DIAG, /* When this quirk is set, setup() would be run after every * open() and not just after the first open(). * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. * */ HCI_QUIRK_NON_PERSISTENT_SETUP, /* When this quirk is set, wide band speech is supported by * the driver since no reliable mechanism exist to report * this from the hardware, a driver flag is use to convey * this support * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, /* When this quirk is set consider Sync Flow Control as supported by * the driver. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, /* When this quirk is set, the LE states reported through the * HCI_LE_READ_SUPPORTED_STATES are invalid/broken. * * This mechanism is necessary as many controllers have been seen has * having trouble initiating a connectable advertisement despite the * state combination being reported as supported. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_LE_STATES, /* When this quirk is set, then erroneous data reporting * is ignored. This is mainly due to the fact that the HCI * Read Default Erroneous Data Reporting command is advertised, * but not supported; these controllers often reply with unknown * command and tend to lock up randomly. Needing a hard reset. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, /* * When this quirk is set, then the hci_suspend_notifier is not * registered. This is intended for devices which drop completely * from the bus on system-suspend and which will show up as a new * HCI after resume. */ HCI_QUIRK_NO_SUSPEND_NOTIFIER, /* * When this quirk is set, LE tx power is not queried on startup * and the min/max tx power values default to HCI_TX_POWER_INVALID. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, /* When this quirk is set, HCI_OP_SET_EVENT_FLT requests with * HCI_FLT_CLEAR_ALL are ignored and event filtering is * completely avoided. A subset of the CSR controller * clones struggle with this and instantly lock up. * * Note that devices using this must (separately) disable * runtime suspend, because event filtering takes place there. */ HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, /* * When this quirk is set, disables the use of * HCI_OP_ENHANCED_SETUP_SYNC_CONN command to setup SCO connections. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, /* * When this quirk is set, the HCI_OP_LE_SET_EXT_SCAN_ENABLE command is * disabled. This is required for some Broadcom controllers which * erroneously claim to support extended scanning. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_EXT_SCAN, /* * When this quirk is set, the HCI_OP_GET_MWS_TRANSPORT_CONFIG command is * disabled. This is required for some Broadcom controllers which * erroneously claim to support MWS Transport Layer Configuration. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, /* When this quirk is set, max_page for local extended features * is set to 1, even if controller reports higher number. Some * controllers (e.g. RTL8723CS) report more pages, but they * don't actually support features declared there. */ HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, /* * When this quirk is set, the HCI_OP_LE_SET_RPA_TIMEOUT command is * skipped during initialization. This is required for the Actions * Semiconductor ATS2851 based controllers, which erroneously claims * to support it. */ HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, /* * When this quirk is set, the HCI_OP_LE_EXT_CREATE_CONN command is * disabled. This is required for the Actions Semiconductor ATS2851 * based controllers, which erroneously claims to support it. */ HCI_QUIRK_BROKEN_EXT_CREATE_CONN, /* * When this quirk is set, the command WRITE_AUTH_PAYLOAD_TIMEOUT is * skipped. This is required for the Actions Semiconductor ATS2851 * based controllers, due to a race condition in pairing process. */ HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, /* When this quirk is set, MSFT extension monitor tracking by * address filter is supported. Since tracking quantity of each * pattern is limited, this feature supports tracking multiple * devices concurrently if controller supports multiple * address filters. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, /* * When this quirk is set, LE Coded PHY shall not be used. This is * required for some Intel controllers which erroneously claim to * support it but it causes problems with extended scanning. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_LE_CODED, /* * When this quirk is set, the HCI_OP_READ_ENC_KEY_SIZE command is * skipped during an HCI_EV_ENCRYPT_CHANGE event. This is required * for Actions Semiconductor ATS2851 based controllers, which erroneously * claim to support it. */ HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, /* * When this quirk is set, the reserved bits of Primary/Secondary_PHY * inside the LE Extended Advertising Report events are discarded. * This is required for some Apple/Broadcom controllers which * abuse these reserved bits for unrelated flags. * * This quirk can be set before hci_register_dev is called or * during the hdev->setup vendor callback. */ HCI_QUIRK_FIXUP_LE_EXT_ADV_REPORT_PHY, /* When this quirk is set, the HCI_OP_READ_VOICE_SETTING command is * skipped. This is required for a subset of the CSR controller clones * which erroneously claim to support it. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_BROKEN_READ_VOICE_SETTING, /* When this quirk is set, the HCI_OP_READ_PAGE_SCAN_TYPE command is * skipped. This is required for a subset of the CSR controller clones * which erroneously claim to support it. * * This quirk must be set before hci_register_dev is called. */ HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, }; /* HCI device flags */ enum { HCI_UP, HCI_INIT, HCI_RUNNING, HCI_PSCAN, HCI_ISCAN, HCI_AUTH, HCI_ENCRYPT, HCI_INQUIRY, HCI_RAW, HCI_RESET, }; /* HCI socket flags */ enum { HCI_SOCK_TRUSTED, HCI_MGMT_INDEX_EVENTS, HCI_MGMT_UNCONF_INDEX_EVENTS, HCI_MGMT_EXT_INDEX_EVENTS, HCI_MGMT_EXT_INFO_EVENTS, HCI_MGMT_OPTION_EVENTS, HCI_MGMT_SETTING_EVENTS, HCI_MGMT_DEV_CLASS_EVENTS, HCI_MGMT_LOCAL_NAME_EVENTS, HCI_MGMT_OOB_DATA_EVENTS, HCI_MGMT_EXP_FEATURE_EVENTS, }; /* * BR/EDR and/or LE controller flags: the flags defined here should represent * states from the controller. */ enum { HCI_SETUP, HCI_CONFIG, HCI_DEBUGFS_CREATED, HCI_POWERING_DOWN, HCI_AUTO_OFF, HCI_RFKILLED, HCI_MGMT, HCI_BONDABLE, HCI_SERVICE_CACHE, HCI_KEEP_DEBUG_KEYS, HCI_USE_DEBUG_KEYS, HCI_UNREGISTER, HCI_UNCONFIGURED, HCI_USER_CHANNEL, HCI_EXT_CONFIGURED, HCI_LE_ADV, HCI_LE_PER_ADV, HCI_LE_SCAN, HCI_SSP_ENABLED, HCI_SC_ENABLED, HCI_SC_ONLY, HCI_PRIVACY, HCI_LIMITED_PRIVACY, HCI_RPA_EXPIRED, HCI_RPA_RESOLVING, HCI_LE_ENABLED, HCI_ADVERTISING, HCI_ADVERTISING_CONNECTABLE, HCI_CONNECTABLE, HCI_DISCOVERABLE, HCI_LIMITED_DISCOVERABLE, HCI_LINK_SECURITY, HCI_PERIODIC_INQ, HCI_FAST_CONNECTABLE, HCI_BREDR_ENABLED, HCI_LE_SCAN_INTERRUPTED, HCI_WIDEBAND_SPEECH_ENABLED, HCI_EVENT_FILTER_CONFIGURED, HCI_PA_SYNC, HCI_SCO_FLOWCTL, HCI_DUT_MODE, HCI_VENDOR_DIAG, HCI_FORCE_BREDR_SMP, HCI_FORCE_STATIC_ADDR, HCI_LL_RPA_RESOLUTION, HCI_CMD_PENDING, HCI_FORCE_NO_MITM, HCI_QUALITY_REPORT, HCI_OFFLOAD_CODECS_ENABLED, HCI_LE_SIMULTANEOUS_ROLES, HCI_CMD_DRAIN_WORKQUEUE, HCI_MESH_EXPERIMENTAL, HCI_MESH, HCI_MESH_SENDING, __HCI_NUM_FLAGS, }; /* HCI timeouts */ #define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ #define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */ #define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */ #define HCI_CMD_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ #define HCI_NCMD_TIMEOUT msecs_to_jiffies(4000) /* 4 seconds */ #define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */ #define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ #define HCI_ACL_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */ #define HCI_LE_CONN_TIMEOUT msecs_to_jiffies(20000) /* 20 seconds */ /* HCI data types */ #define HCI_COMMAND_PKT 0x01 #define HCI_ACLDATA_PKT 0x02 #define HCI_SCODATA_PKT 0x03 #define HCI_EVENT_PKT 0x04 #define HCI_ISODATA_PKT 0x05 #define HCI_DIAG_PKT 0xf0 #define HCI_DRV_PKT 0xf1 #define HCI_VENDOR_PKT 0xff /* HCI packet types */ #define HCI_DM1 0x0008 #define HCI_DM3 0x0400 #define HCI_DM5 0x4000 #define HCI_DH1 0x0010 #define HCI_DH3 0x0800 #define HCI_DH5 0x8000 /* HCI packet types inverted masks */ #define HCI_2DH1 0x0002 #define HCI_3DH1 0x0004 #define HCI_2DH3 0x0100 #define HCI_3DH3 0x0200 #define HCI_2DH5 0x1000 #define HCI_3DH5 0x2000 #define HCI_HV1 0x0020 #define HCI_HV2 0x0040 #define HCI_HV3 0x0080 #define SCO_PTYPE_MASK (HCI_HV1 | HCI_HV2 | HCI_HV3) #define ACL_PTYPE_MASK (~SCO_PTYPE_MASK) /* eSCO packet types */ #define ESCO_HV1 0x0001 #define ESCO_HV2 0x0002 #define ESCO_HV3 0x0004 #define ESCO_EV3 0x0008 #define ESCO_EV4 0x0010 #define ESCO_EV5 0x0020 #define ESCO_2EV3 0x0040 #define ESCO_3EV3 0x0080 #define ESCO_2EV5 0x0100 #define ESCO_3EV5 0x0200 #define SCO_ESCO_MASK (ESCO_HV1 | ESCO_HV2 | ESCO_HV3) #define EDR_ESCO_MASK (ESCO_2EV3 | ESCO_3EV3 | ESCO_2EV5 | ESCO_3EV5) /* ACL flags */ #define ACL_START_NO_FLUSH 0x00 #define ACL_CONT 0x01 #define ACL_START 0x02 #define ACL_COMPLETE 0x03 #define ACL_ACTIVE_BCAST 0x04 #define ACL_PICO_BCAST 0x08 /* ISO PB flags */ #define ISO_START 0x00 #define ISO_CONT 0x01 #define ISO_SINGLE 0x02 #define ISO_END 0x03 /* ISO TS flags */ #define ISO_TS 0x01 /* Baseband links */ #define SCO_LINK 0x00 #define ACL_LINK 0x01 #define ESCO_LINK 0x02 /* Low Energy links do not have defined link type. Use invented one */ #define LE_LINK 0x80 #define CIS_LINK 0x82 #define BIS_LINK 0x83 #define INVALID_LINK 0xff /* LMP features */ #define LMP_3SLOT 0x01 #define LMP_5SLOT 0x02 #define LMP_ENCRYPT 0x04 #define LMP_SOFFSET 0x08 #define LMP_TACCURACY 0x10 #define LMP_RSWITCH 0x20 #define LMP_HOLD 0x40 #define LMP_SNIFF 0x80 #define LMP_PARK 0x01 #define LMP_RSSI 0x02 #define LMP_QUALITY 0x04 #define LMP_SCO 0x08 #define LMP_HV2 0x10 #define LMP_HV3 0x20 #define LMP_ULAW 0x40 #define LMP_ALAW 0x80 #define LMP_CVSD 0x01 #define LMP_PSCHEME 0x02 #define LMP_PCONTROL 0x04 #define LMP_TRANSPARENT 0x08 #define LMP_EDR_2M 0x02 #define LMP_EDR_3M 0x04 #define LMP_RSSI_INQ 0x40 #define LMP_ESCO 0x80 #define LMP_EV4 0x01 #define LMP_EV5 0x02 #define LMP_NO_BREDR 0x20 #define LMP_LE 0x40 #define LMP_EDR_3SLOT 0x80 #define LMP_EDR_5SLOT 0x01 #define LMP_SNIFF_SUBR 0x02 #define LMP_PAUSE_ENC 0x04 #define LMP_EDR_ESCO_2M 0x20 #define LMP_EDR_ESCO_3M 0x40 #define LMP_EDR_3S_ESCO 0x80 #define LMP_EXT_INQ 0x01 #define LMP_SIMUL_LE_BR 0x02 #define LMP_SIMPLE_PAIR 0x08 #define LMP_ERR_DATA_REPORTING 0x20 #define LMP_NO_FLUSH 0x40 #define LMP_LSTO 0x01 #define LMP_INQ_TX_PWR 0x02 #define LMP_EXTFEATURES 0x80 /* Extended LMP features */ #define LMP_CPB_CENTRAL 0x01 #define LMP_CPB_PERIPHERAL 0x02 #define LMP_SYNC_TRAIN 0x04 #define LMP_SYNC_SCAN 0x08 #define LMP_SC 0x01 #define LMP_PING 0x02 /* Host features */ #define LMP_HOST_SSP 0x01 #define LMP_HOST_LE 0x02 #define LMP_HOST_LE_BREDR 0x04 #define LMP_HOST_SC 0x08 /* LE features */ #define HCI_LE_ENCRYPTION 0x01 #define HCI_LE_CONN_PARAM_REQ_PROC 0x02 #define HCI_LE_PERIPHERAL_FEATURES 0x08 #define HCI_LE_PING 0x10 #define HCI_LE_DATA_LEN_EXT 0x20 #define HCI_LE_LL_PRIVACY 0x40 #define HCI_LE_EXT_SCAN_POLICY 0x80 #define HCI_LE_PHY_2M 0x01 #define HCI_LE_PHY_CODED 0x08 #define HCI_LE_EXT_ADV 0x10 #define HCI_LE_PERIODIC_ADV 0x20 #define HCI_LE_CHAN_SEL_ALG2 0x40 #define HCI_LE_CIS_CENTRAL 0x10 #define HCI_LE_CIS_PERIPHERAL 0x20 #define HCI_LE_ISO_BROADCASTER 0x40 #define HCI_LE_ISO_SYNC_RECEIVER 0x80 /* Connection modes */ #define HCI_CM_ACTIVE 0x0000 #define HCI_CM_HOLD 0x0001 #define HCI_CM_SNIFF 0x0002 #define HCI_CM_PARK 0x0003 /* Link policies */ #define HCI_LP_RSWITCH 0x0001 #define HCI_LP_HOLD 0x0002 #define HCI_LP_SNIFF 0x0004 #define HCI_LP_PARK 0x0008 /* Link modes */ #define HCI_LM_ACCEPT 0x8000 #define HCI_LM_MASTER 0x0001 #define HCI_LM_AUTH 0x0002 #define HCI_LM_ENCRYPT 0x0004 #define HCI_LM_TRUSTED 0x0008 #define HCI_LM_RELIABLE 0x0010 #define HCI_LM_SECURE 0x0020 #define HCI_LM_FIPS 0x0040 /* Authentication types */ #define HCI_AT_NO_BONDING 0x00 #define HCI_AT_NO_BONDING_MITM 0x01 #define HCI_AT_DEDICATED_BONDING 0x02 #define HCI_AT_DEDICATED_BONDING_MITM 0x03 #define HCI_AT_GENERAL_BONDING 0x04 #define HCI_AT_GENERAL_BONDING_MITM 0x05 /* I/O capabilities */ #define HCI_IO_DISPLAY_ONLY 0x00 #define HCI_IO_DISPLAY_YESNO 0x01 #define HCI_IO_KEYBOARD_ONLY 0x02 #define HCI_IO_NO_INPUT_OUTPUT 0x03 /* Link Key types */ #define HCI_LK_COMBINATION 0x00 #define HCI_LK_LOCAL_UNIT 0x01 #define HCI_LK_REMOTE_UNIT 0x02 #define HCI_LK_DEBUG_COMBINATION 0x03 #define HCI_LK_UNAUTH_COMBINATION_P192 0x04 #define HCI_LK_AUTH_COMBINATION_P192 0x05 #define HCI_LK_CHANGED_COMBINATION 0x06 #define HCI_LK_UNAUTH_COMBINATION_P256 0x07 #define HCI_LK_AUTH_COMBINATION_P256 0x08 /* ---- HCI Error Codes ---- */ #define HCI_ERROR_UNKNOWN_CONN_ID 0x02 #define HCI_ERROR_AUTH_FAILURE 0x05 #define HCI_ERROR_PIN_OR_KEY_MISSING 0x06 #define HCI_ERROR_MEMORY_EXCEEDED 0x07 #define HCI_ERROR_CONNECTION_TIMEOUT 0x08 #define HCI_ERROR_COMMAND_DISALLOWED 0x0c #define HCI_ERROR_REJ_LIMITED_RESOURCES 0x0d #define HCI_ERROR_REJ_BAD_ADDR 0x0f #define HCI_ERROR_INVALID_PARAMETERS 0x12 #define HCI_ERROR_REMOTE_USER_TERM 0x13 #define HCI_ERROR_REMOTE_LOW_RESOURCES 0x14 #define HCI_ERROR_REMOTE_POWER_OFF 0x15 #define HCI_ERROR_LOCAL_HOST_TERM 0x16 #define HCI_ERROR_PAIRING_NOT_ALLOWED 0x18 #define HCI_ERROR_UNSUPPORTED_REMOTE_FEATURE 0x1a #define HCI_ERROR_INVALID_LL_PARAMS 0x1e #define HCI_ERROR_UNSPECIFIED 0x1f #define HCI_ERROR_ADVERTISING_TIMEOUT 0x3c #define HCI_ERROR_CANCELLED_BY_HOST 0x44 /* Flow control modes */ #define HCI_FLOW_CTL_MODE_PACKET_BASED 0x00 #define HCI_FLOW_CTL_MODE_BLOCK_BASED 0x01 /* The core spec defines 127 as the "not available" value */ #define HCI_TX_POWER_INVALID 127 #define HCI_RSSI_INVALID 127 #define HCI_SYNC_HANDLE_INVALID 0xffff #define HCI_SID_INVALID 0xff #define HCI_ROLE_MASTER 0x00 #define HCI_ROLE_SLAVE 0x01 /* Extended Inquiry Response field types */ #define EIR_FLAGS 0x01 /* flags */ #define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */ #define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */ #define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */ #define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */ #define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */ #define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */ #define EIR_NAME_SHORT 0x08 /* shortened local name */ #define EIR_NAME_COMPLETE 0x09 /* complete local name */ #define EIR_TX_POWER 0x0A /* transmit power level */ #define EIR_CLASS_OF_DEV 0x0D /* Class of Device */ #define EIR_SSP_HASH_C192 0x0E /* Simple Pairing Hash C-192 */ #define EIR_SSP_RAND_R192 0x0F /* Simple Pairing Randomizer R-192 */ #define EIR_DEVICE_ID 0x10 /* device ID */ #define EIR_APPEARANCE 0x19 /* Device appearance */ #define EIR_SERVICE_DATA 0x16 /* Service Data */ #define EIR_LE_BDADDR 0x1B /* LE Bluetooth device address */ #define EIR_LE_ROLE 0x1C /* LE role */ #define EIR_SSP_HASH_C256 0x1D /* Simple Pairing Hash C-256 */ #define EIR_SSP_RAND_R256 0x1E /* Simple Pairing Rand R-256 */ #define EIR_LE_SC_CONFIRM 0x22 /* LE SC Confirmation Value */ #define EIR_LE_SC_RANDOM 0x23 /* LE SC Random Value */ /* Low Energy Advertising Flags */ #define LE_AD_LIMITED 0x01 /* Limited Discoverable */ #define LE_AD_GENERAL 0x02 /* General Discoverable */ #define LE_AD_NO_BREDR 0x04 /* BR/EDR not supported */ #define LE_AD_SIM_LE_BREDR_CTRL 0x08 /* Simultaneous LE & BR/EDR Controller */ #define LE_AD_SIM_LE_BREDR_HOST 0x10 /* Simultaneous LE & BR/EDR Host */ /* ----- HCI Commands ---- */ #define HCI_OP_NOP 0x0000 #define HCI_OP_INQUIRY 0x0401 struct hci_cp_inquiry { __u8 lap[3]; __u8 length; __u8 num_rsp; } __packed; #define HCI_OP_INQUIRY_CANCEL 0x0402 #define HCI_OP_PERIODIC_INQ 0x0403 #define HCI_OP_EXIT_PERIODIC_INQ 0x0404 #define HCI_OP_CREATE_CONN 0x0405 struct hci_cp_create_conn { bdaddr_t bdaddr; __le16 pkt_type; __u8 pscan_rep_mode; __u8 pscan_mode; __le16 clock_offset; __u8 role_switch; } __packed; #define HCI_OP_DISCONNECT 0x0406 struct hci_cp_disconnect { __le16 handle; __u8 reason; } __packed; #define HCI_OP_ADD_SCO 0x0407 struct hci_cp_add_sco { __le16 handle; __le16 pkt_type; } __packed; #define HCI_OP_CREATE_CONN_CANCEL 0x0408 struct hci_cp_create_conn_cancel { bdaddr_t bdaddr; } __packed; #define HCI_OP_ACCEPT_CONN_REQ 0x0409 struct hci_cp_accept_conn_req { bdaddr_t bdaddr; __u8 role; } __packed; #define HCI_OP_REJECT_CONN_REQ 0x040a struct hci_cp_reject_conn_req { bdaddr_t bdaddr; __u8 reason; } __packed; #define HCI_OP_LINK_KEY_REPLY 0x040b struct hci_cp_link_key_reply { bdaddr_t bdaddr; __u8 link_key[HCI_LINK_KEY_SIZE]; } __packed; #define HCI_OP_LINK_KEY_NEG_REPLY 0x040c struct hci_cp_link_key_neg_reply { bdaddr_t bdaddr; } __packed; #define HCI_OP_PIN_CODE_REPLY 0x040d struct hci_cp_pin_code_reply { bdaddr_t bdaddr; __u8 pin_len; __u8 pin_code[16]; } __packed; struct hci_rp_pin_code_reply { __u8 status; bdaddr_t bdaddr; } __packed; #define HCI_OP_PIN_CODE_NEG_REPLY 0x040e struct hci_cp_pin_code_neg_reply { bdaddr_t bdaddr; } __packed; struct hci_rp_pin_code_neg_reply { __u8 status; bdaddr_t bdaddr; } __packed; #define HCI_OP_CHANGE_CONN_PTYPE 0x040f struct hci_cp_change_conn_ptype { __le16 handle; __le16 pkt_type; } __packed; #define HCI_OP_AUTH_REQUESTED 0x0411 struct hci_cp_auth_requested { __le16 handle; } __packed; #define HCI_OP_SET_CONN_ENCRYPT 0x0413 struct hci_cp_set_conn_encrypt { __le16 handle; __u8 encrypt; } __packed; #define HCI_OP_CHANGE_CONN_LINK_KEY 0x0415 struct hci_cp_change_conn_link_key { __le16 handle; } __packed; #define HCI_OP_REMOTE_NAME_REQ 0x0419 struct hci_cp_remote_name_req { bdaddr_t bdaddr; __u8 pscan_rep_mode; __u8 pscan_mode; __le16 clock_offset; } __packed; #define HCI_OP_REMOTE_NAME_REQ_CANCEL 0x041a struct hci_cp_remote_name_req_cancel { bdaddr_t bdaddr; } __packed; struct hci_rp_remote_name_req_cancel { __u8 status; bdaddr_t bdaddr; } __packed; #define HCI_OP_READ_REMOTE_FEATURES 0x041b struct hci_cp_read_remote_features { __le16 handle; } __packed; #define HCI_OP_READ_REMOTE_EXT_FEATURES 0x041c struct hci_cp_read_remote_ext_features { __le16 handle; __u8 page; } __packed; #define HCI_OP_READ_REMOTE_VERSION 0x041d struct hci_cp_read_remote_version { __le16 handle; } __packed; #define HCI_OP_READ_CLOCK_OFFSET 0x041f struct hci_cp_read_clock_offset { __le16 handle; } __packed; #define HCI_OP_SETUP_SYNC_CONN 0x0428 struct hci_cp_setup_sync_conn { __le16 handle; __le32 tx_bandwidth; __le32 rx_bandwidth; __le16 max_latency; __le16 voice_setting; __u8 retrans_effort; __le16 pkt_type; } __packed; #define HCI_OP_ACCEPT_SYNC_CONN_REQ 0x0429 struct hci_cp_accept_sync_conn_req { bdaddr_t bdaddr; __le32 tx_bandwidth; __le32 rx_bandwidth; __le16 max_latency; __le16 content_format; __u8 retrans_effort; __le16 pkt_type; } __packed; #define HCI_OP_REJECT_SYNC_CONN_REQ 0x042a struct hci_cp_reject_sync_conn_req { bdaddr_t bdaddr; __u8 reason; } __packed; #define HCI_OP_IO_CAPABILITY_REPLY 0x042b struct hci_cp_io_capability_reply { bdaddr_t bdaddr; __u8 capability; __u8 oob_data; __u8 authentication; } __packed; #define HCI_OP_USER_CONFIRM_REPLY 0x042c struct hci_cp_user_confirm_reply { bdaddr_t bdaddr; } __packed; struct hci_rp_user_confirm_reply { __u8 status; bdaddr_t bdaddr; } __packed; #define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d #define HCI_OP_USER_PASSKEY_REPLY 0x042e struct hci_cp_user_passkey_reply { bdaddr_t bdaddr; __le32 passkey; } __packed; #define HCI_OP_USER_PASSKEY_NEG_REPLY 0x042f #define HCI_OP_REMOTE_OOB_DATA_REPLY 0x0430 struct hci_cp_remote_oob_data_reply { bdaddr_t bdaddr; __u8 hash[16]; __u8 rand[16]; } __packed; #define HCI_OP_REMOTE_OOB_DATA_NEG_REPLY 0x0433 struct hci_cp_remote_oob_data_neg_reply { bdaddr_t bdaddr; } __packed; #define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434 struct hci_cp_io_capability_neg_reply { bdaddr_t bdaddr; __u8 reason; } __packed; #define HCI_OP_ENHANCED_SETUP_SYNC_CONN 0x043d struct hci_coding_format { __u8 id; __le16 cid; __le16 vid; } __packed; struct hci_cp_enhanced_setup_sync_conn { __le16 handle; __le32 tx_bandwidth; __le32 rx_bandwidth; struct hci_coding_format tx_coding_format; struct hci_coding_format rx_coding_format; __le16 tx_codec_frame_size; __le16 rx_codec_frame_size; __le32 in_bandwidth; __le32 out_bandwidth; struct hci_coding_format in_coding_format; struct hci_coding_format out_coding_format; __le16 in_coded_data_size; __le16 out_coded_data_size; __u8 in_pcm_data_format; __u8 out_pcm_data_format; __u8 in_pcm_sample_payload_msb_pos; __u8 out_pcm_sample_payload_msb_pos; __u8 in_data_path; __u8 out_data_path; __u8 in_transport_unit_size; __u8 out_transport_unit_size; __le16 max_latency; __le16 pkt_type; __u8 retrans_effort; } __packed; struct hci_rp_logical_link_cancel { __u8 status; __u8 phy_handle; __u8 flow_spec_id; } __packed; #define HCI_OP_SET_CPB 0x0441 struct hci_cp_set_cpb { __u8 enable; __u8 lt_addr; __u8 lpo_allowed; __le16 packet_type; __le16 interval_min; __le16 interval_max; __le16 cpb_sv_tout; } __packed; struct hci_rp_set_cpb { __u8 status; __u8 lt_addr; __le16 interval; } __packed; #define HCI_OP_START_SYNC_TRAIN 0x0443 #define HCI_OP_REMOTE_OOB_EXT_DATA_REPLY 0x0445 struct hci_cp_remote_oob_ext_data_reply { bdaddr_t bdaddr; __u8 hash192[16]; __u8 rand192[16]; __u8 hash256[16]; __u8 rand256[16]; } __packed; #define HCI_OP_SNIFF_MODE 0x0803 struct hci_cp_sniff_mode { __le16 handle; __le16 max_interval; __le16 min_interval; __le16 attempt; __le16 timeout; } __packed; #define HCI_OP_EXIT_SNIFF_MODE 0x0804 struct hci_cp_exit_sniff_mode { __le16 handle; } __packed; #define HCI_OP_ROLE_DISCOVERY 0x0809 struct hci_cp_role_discovery { __le16 handle; } __packed; struct hci_rp_role_discovery { __u8 status; __le16 handle; __u8 role; } __packed; #define HCI_OP_SWITCH_ROLE 0x080b struct hci_cp_switch_role { bdaddr_t bdaddr; __u8 role; } __packed; #define HCI_OP_READ_LINK_POLICY 0x080c struct hci_cp_read_link_policy { __le16 handle; } __packed; struct hci_rp_read_link_policy { __u8 status; __le16 handle; __le16 policy; } __packed; #define HCI_OP_WRITE_LINK_POLICY 0x080d struct hci_cp_write_link_policy { __le16 handle; __le16 policy; } __packed; struct hci_rp_write_link_policy { __u8 status; __le16 handle; } __packed; #define HCI_OP_READ_DEF_LINK_POLICY 0x080e struct hci_rp_read_def_link_policy { __u8 status; __le16 policy; } __packed; #define HCI_OP_WRITE_DEF_LINK_POLICY 0x080f struct hci_cp_write_def_link_policy { __le16 policy; } __packed; #define HCI_OP_SNIFF_SUBRATE 0x0811 struct hci_cp_sniff_subrate { __le16 handle; __le16 max_latency; __le16 min_remote_timeout; __le16 min_local_timeout; } __packed; #define HCI_OP_SET_EVENT_MASK 0x0c01 #define HCI_OP_RESET 0x0c03 #define HCI_OP_SET_EVENT_FLT 0x0c05 #define HCI_SET_EVENT_FLT_SIZE 9 struct hci_cp_set_event_filter { __u8 flt_type; __u8 cond_type; struct { bdaddr_t bdaddr; __u8 auto_accept; } __packed addr_conn_flt; } __packed; /* Filter types */ #define HCI_FLT_CLEAR_ALL 0x00 #define HCI_FLT_INQ_RESULT 0x01 #define HCI_FLT_CONN_SETUP 0x02 /* CONN_SETUP Condition types */ #define HCI_CONN_SETUP_ALLOW_ALL 0x00 #define HCI_CONN_SETUP_ALLOW_CLASS 0x01 #define HCI_CONN_SETUP_ALLOW_BDADDR 0x02 /* CONN_SETUP Conditions */ #define HCI_CONN_SETUP_AUTO_OFF 0x01 #define HCI_CONN_SETUP_AUTO_ON 0x02 #define HCI_CONN_SETUP_AUTO_ON_WITH_RS 0x03 #define HCI_OP_READ_STORED_LINK_KEY 0x0c0d struct hci_cp_read_stored_link_key { bdaddr_t bdaddr; __u8 read_all; } __packed; struct hci_rp_read_stored_link_key { __u8 status; __le16 max_keys; __le16 num_keys; } __packed; #define HCI_OP_DELETE_STORED_LINK_KEY 0x0c12 struct hci_cp_delete_stored_link_key { bdaddr_t bdaddr; __u8 delete_all; } __packed; struct hci_rp_delete_stored_link_key { __u8 status; __le16 num_keys; } __packed; #define HCI_MAX_NAME_LENGTH 248 #define HCI_OP_WRITE_LOCAL_NAME 0x0c13 struct hci_cp_write_local_name { __u8 name[HCI_MAX_NAME_LENGTH]; } __packed; #define HCI_OP_READ_LOCAL_NAME 0x0c14 struct hci_rp_read_local_name { __u8 status; __u8 name[HCI_MAX_NAME_LENGTH]; } __packed; #define HCI_OP_WRITE_CA_TIMEOUT 0x0c16 #define HCI_OP_WRITE_PG_TIMEOUT 0x0c18 #define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a #define SCAN_DISABLED 0x00 #define SCAN_INQUIRY 0x01 #define SCAN_PAGE 0x02 #define HCI_OP_READ_AUTH_ENABLE 0x0c1f #define HCI_OP_WRITE_AUTH_ENABLE 0x0c20 #define AUTH_DISABLED 0x00 #define AUTH_ENABLED 0x01 #define HCI_OP_READ_ENCRYPT_MODE 0x0c21 #define HCI_OP_WRITE_ENCRYPT_MODE 0x0c22 #define ENCRYPT_DISABLED 0x00 #define ENCRYPT_P2P 0x01 #define ENCRYPT_BOTH 0x02 #define HCI_OP_READ_CLASS_OF_DEV 0x0c23 struct hci_rp_read_class_of_dev { __u8 status; __u8 dev_class[3]; } __packed; #define HCI_OP_WRITE_CLASS_OF_DEV 0x0c24 struct hci_cp_write_class_of_dev { __u8 dev_class[3]; } __packed; #define HCI_OP_READ_VOICE_SETTING 0x0c25 struct hci_rp_read_voice_setting { __u8 status; __le16 voice_setting; } __packed; #define HCI_OP_WRITE_VOICE_SETTING 0x0c26 struct hci_cp_write_voice_setting { __le16 voice_setting; } __packed; #define HCI_OP_HOST_BUFFER_SIZE 0x0c33 struct hci_cp_host_buffer_size { __le16 acl_mtu; __u8 sco_mtu; __le16 acl_max_pkt; __le16 sco_max_pkt; } __packed; #define HCI_OP_READ_NUM_SUPPORTED_IAC 0x0c38 struct hci_rp_read_num_supported_iac { __u8 status; __u8 num_iac; } __packed; #define HCI_OP_READ_CURRENT_IAC_LAP 0x0c39 #define HCI_OP_WRITE_CURRENT_IAC_LAP 0x0c3a struct hci_cp_write_current_iac_lap { __u8 num_iac; __u8 iac_lap[6]; } __packed; #define HCI_OP_WRITE_INQUIRY_MODE 0x0c45 #define HCI_MAX_EIR_LENGTH 240 #define HCI_OP_WRITE_EIR 0x0c52 struct hci_cp_write_eir { __u8 fec; __u8 data[HCI_MAX_EIR_LENGTH]; } __packed; #define HCI_OP_READ_SSP_MODE 0x0c55 struct hci_rp_read_ssp_mode { __u8 status; __u8 mode; } __packed; #define HCI_OP_WRITE_SSP_MODE 0x0c56 struct hci_cp_write_ssp_mode { __u8 mode; } __packed; #define HCI_OP_READ_LOCAL_OOB_DATA 0x0c57 struct hci_rp_read_local_oob_data { __u8 status; __u8 hash[16]; __u8 rand[16]; } __packed; #define HCI_OP_READ_INQ_RSP_TX_POWER 0x0c58 struct hci_rp_read_inq_rsp_tx_power { __u8 status; __s8 tx_power; } __packed; #define HCI_OP_READ_DEF_ERR_DATA_REPORTING 0x0c5a #define ERR_DATA_REPORTING_DISABLED 0x00 #define ERR_DATA_REPORTING_ENABLED 0x01 struct hci_rp_read_def_err_data_reporting { __u8 status; __u8 err_data_reporting; } __packed; #define HCI_OP_WRITE_DEF_ERR_DATA_REPORTING 0x0c5b struct hci_cp_write_def_err_data_reporting { __u8 err_data_reporting; } __packed; #define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63 #define HCI_OP_READ_LOCATION_DATA 0x0c64 #define HCI_OP_READ_FLOW_CONTROL_MODE 0x0c66 struct hci_rp_read_flow_control_mode { __u8 status; __u8 mode; } __packed; #define HCI_OP_WRITE_LE_HOST_SUPPORTED 0x0c6d struct hci_cp_write_le_host_supported { __u8 le; __u8 simul; } __packed; #define HCI_OP_SET_RESERVED_LT_ADDR 0x0c74 struct hci_cp_set_reserved_lt_addr { __u8 lt_addr; } __packed; struct hci_rp_set_reserved_lt_addr { __u8 status; __u8 lt_addr; } __packed; #define HCI_OP_DELETE_RESERVED_LT_ADDR 0x0c75 struct hci_cp_delete_reserved_lt_addr { __u8 lt_addr; } __packed; struct hci_rp_delete_reserved_lt_addr { __u8 status; __u8 lt_addr; } __packed; #define HCI_OP_SET_CPB_DATA 0x0c76 struct hci_cp_set_cpb_data { __u8 lt_addr; __u8 fragment; __u8 data_length; __u8 data[HCI_MAX_CPB_DATA_SIZE]; } __packed; struct hci_rp_set_cpb_data { __u8 status; __u8 lt_addr; } __packed; #define HCI_OP_READ_SYNC_TRAIN_PARAMS 0x0c77 #define HCI_OP_WRITE_SYNC_TRAIN_PARAMS 0x0c78 struct hci_cp_write_sync_train_params { __le16 interval_min; __le16 interval_max; __le32 sync_train_tout; __u8 service_data; } __packed; struct hci_rp_write_sync_train_params { __u8 status; __le16 sync_train_int; } __packed; #define HCI_OP_READ_SC_SUPPORT 0x0c79 struct hci_rp_read_sc_support { __u8 status; __u8 support; } __packed; #define HCI_OP_WRITE_SC_SUPPORT 0x0c7a struct hci_cp_write_sc_support { __u8 support; } __packed; #define HCI_OP_READ_AUTH_PAYLOAD_TO 0x0c7b struct hci_cp_read_auth_payload_to { __le16 handle; } __packed; struct hci_rp_read_auth_payload_to { __u8 status; __le16 handle; __le16 timeout; } __packed; #define HCI_OP_WRITE_AUTH_PAYLOAD_TO 0x0c7c struct hci_cp_write_auth_payload_to { __le16 handle; __le16 timeout; } __packed; struct hci_rp_write_auth_payload_to { __u8 status; __le16 handle; } __packed; #define HCI_OP_READ_LOCAL_OOB_EXT_DATA 0x0c7d struct hci_rp_read_local_oob_ext_data { __u8 status; __u8 hash192[16]; __u8 rand192[16]; __u8 hash256[16]; __u8 rand256[16]; } __packed; #define HCI_CONFIGURE_DATA_PATH 0x0c83 struct hci_op_configure_data_path { __u8 direction; __u8 data_path_id; __u8 vnd_len; __u8 vnd_data[]; } __packed; #define HCI_OP_READ_LOCAL_VERSION 0x1001 struct hci_rp_read_local_version { __u8 status; __u8 hci_ver; __le16 hci_rev; __u8 lmp_ver; __le16 manufacturer; __le16 lmp_subver; } __packed; #define HCI_OP_READ_LOCAL_COMMANDS 0x1002 struct hci_rp_read_local_commands { __u8 status; __u8 commands[64]; } __packed; #define HCI_OP_READ_LOCAL_FEATURES 0x1003 struct hci_rp_read_local_features { __u8 status; __u8 features[8]; } __packed; #define HCI_OP_READ_LOCAL_EXT_FEATURES 0x1004 struct hci_cp_read_local_ext_features { __u8 page; } __packed; struct hci_rp_read_local_ext_features { __u8 status; __u8 page; __u8 max_page; __u8 features[8]; } __packed; #define HCI_OP_READ_BUFFER_SIZE 0x1005 struct hci_rp_read_buffer_size { __u8 status; __le16 acl_mtu; __u8 sco_mtu; __le16 acl_max_pkt; __le16 sco_max_pkt; } __packed; #define HCI_OP_READ_BD_ADDR 0x1009 struct hci_rp_read_bd_addr { __u8 status; bdaddr_t bdaddr; } __packed; #define HCI_OP_READ_DATA_BLOCK_SIZE 0x100a struct hci_rp_read_data_block_size { __u8 status; __le16 max_acl_len; __le16 block_len; __le16 num_blocks; } __packed; #define HCI_OP_READ_LOCAL_CODECS 0x100b struct hci_std_codecs { __u8 num; __u8 codec[]; } __packed; struct hci_vnd_codec { /* company id */ __le16 cid; /* vendor codec id */ __le16 vid; } __packed; struct hci_vnd_codecs { __u8 num; struct hci_vnd_codec codec[]; } __packed; struct hci_rp_read_local_supported_codecs { __u8 status; struct hci_std_codecs std_codecs; struct hci_vnd_codecs vnd_codecs; } __packed; #define HCI_OP_READ_LOCAL_PAIRING_OPTS 0x100c struct hci_rp_read_local_pairing_opts { __u8 status; __u8 pairing_opts; __u8 max_key_size; } __packed; #define HCI_OP_READ_LOCAL_CODECS_V2 0x100d struct hci_std_codec_v2 { __u8 id; __u8 transport; } __packed; struct hci_std_codecs_v2 { __u8 num; struct hci_std_codec_v2 codec[]; } __packed; struct hci_vnd_codec_v2 { __le16 cid; __le16 vid; __u8 transport; } __packed; struct hci_vnd_codecs_v2 { __u8 num; struct hci_vnd_codec_v2 codec[]; } __packed; struct hci_rp_read_local_supported_codecs_v2 { __u8 status; struct hci_std_codecs_v2 std_codecs; struct hci_vnd_codecs_v2 vendor_codecs; } __packed; #define HCI_OP_READ_LOCAL_CODEC_CAPS 0x100e struct hci_op_read_local_codec_caps { __u8 id; __le16 cid; __le16 vid; __u8 transport; __u8 direction; } __packed; struct hci_codec_caps { __u8 len; __u8 data[]; } __packed; struct hci_rp_read_local_codec_caps { __u8 status; __u8 num_caps; } __packed; #define HCI_OP_READ_PAGE_SCAN_ACTIVITY 0x0c1b struct hci_rp_read_page_scan_activity { __u8 status; __le16 interval; __le16 window; } __packed; #define HCI_OP_WRITE_PAGE_SCAN_ACTIVITY 0x0c1c struct hci_cp_write_page_scan_activity { __le16 interval; __le16 window; } __packed; #define HCI_OP_READ_TX_POWER 0x0c2d struct hci_cp_read_tx_power { __le16 handle; __u8 type; } __packed; struct hci_rp_read_tx_power { __u8 status; __le16 handle; __s8 tx_power; } __packed; #define HCI_OP_WRITE_SYNC_FLOWCTL 0x0c2f struct hci_cp_write_sync_flowctl { __u8 enable; } __packed; #define HCI_OP_READ_PAGE_SCAN_TYPE 0x0c46 struct hci_rp_read_page_scan_type { __u8 status; __u8 type; } __packed; #define HCI_OP_WRITE_PAGE_SCAN_TYPE 0x0c47 #define PAGE_SCAN_TYPE_STANDARD 0x00 #define PAGE_SCAN_TYPE_INTERLACED 0x01 #define HCI_OP_READ_RSSI 0x1405 struct hci_cp_read_rssi { __le16 handle; } __packed; struct hci_rp_read_rssi { __u8 status; __le16 handle; __s8 rssi; } __packed; #define HCI_OP_READ_CLOCK 0x1407 struct hci_cp_read_clock { __le16 handle; __u8 which; } __packed; struct hci_rp_read_clock { __u8 status; __le16 handle; __le32 clock; __le16 accuracy; } __packed; #define HCI_OP_READ_ENC_KEY_SIZE 0x1408 struct hci_cp_read_enc_key_size { __le16 handle; } __packed; struct hci_rp_read_enc_key_size { __u8 status; __le16 handle; __u8 key_size; } __packed; #define HCI_OP_GET_MWS_TRANSPORT_CONFIG 0x140c #define HCI_OP_ENABLE_DUT_MODE 0x1803 #define HCI_OP_WRITE_SSP_DEBUG_MODE 0x1804 #define HCI_OP_LE_SET_EVENT_MASK 0x2001 struct hci_cp_le_set_event_mask { __u8 mask[8]; } __packed; /* BLUETOOTH CORE SPECIFICATION Version 5.4 | Vol 4, Part E * 7.8.2 LE Read Buffer Size command * MAX_LE_MTU is 0xffff. * 0 is also valid. It means that no dedicated LE Buffer exists. * It should use the HCI_Read_Buffer_Size command and mtu is shared * between BR/EDR and LE. */ #define HCI_MIN_LE_MTU 0x001b #define HCI_OP_LE_READ_BUFFER_SIZE 0x2002 struct hci_rp_le_read_buffer_size { __u8 status; __le16 le_mtu; __u8 le_max_pkt; } __packed; #define HCI_OP_LE_READ_LOCAL_FEATURES 0x2003 struct hci_rp_le_read_local_features { __u8 status; __u8 features[8]; } __packed; #define HCI_OP_LE_SET_RANDOM_ADDR 0x2005 #define HCI_OP_LE_SET_ADV_PARAM 0x2006 struct hci_cp_le_set_adv_param { __le16 min_interval; __le16 max_interval; __u8 type; __u8 own_address_type; __u8 direct_addr_type; bdaddr_t direct_addr; __u8 channel_map; __u8 filter_policy; } __packed; #define HCI_OP_LE_READ_ADV_TX_POWER 0x2007 struct hci_rp_le_read_adv_tx_power { __u8 status; __s8 tx_power; } __packed; #define HCI_MAX_AD_LENGTH 31 #define HCI_OP_LE_SET_ADV_DATA 0x2008 struct hci_cp_le_set_adv_data { __u8 length; __u8 data[HCI_MAX_AD_LENGTH]; } __packed; #define HCI_OP_LE_SET_SCAN_RSP_DATA 0x2009 struct hci_cp_le_set_scan_rsp_data { __u8 length; __u8 data[HCI_MAX_AD_LENGTH]; } __packed; #define HCI_OP_LE_SET_ADV_ENABLE 0x200a #define LE_SCAN_PASSIVE 0x00 #define LE_SCAN_ACTIVE 0x01 #define HCI_OP_LE_SET_SCAN_PARAM 0x200b struct hci_cp_le_set_scan_param { __u8 type; __le16 interval; __le16 window; __u8 own_address_type; __u8 filter_policy; } __packed; #define LE_SCAN_DISABLE 0x00 #define LE_SCAN_ENABLE 0x01 #define LE_SCAN_FILTER_DUP_DISABLE 0x00 #define LE_SCAN_FILTER_DUP_ENABLE 0x01 #define HCI_OP_LE_SET_SCAN_ENABLE 0x200c struct hci_cp_le_set_scan_enable { __u8 enable; __u8 filter_dup; } __packed; #define HCI_LE_USE_PEER_ADDR 0x00 #define HCI_LE_USE_ACCEPT_LIST 0x01 #define HCI_OP_LE_CREATE_CONN 0x200d struct hci_cp_le_create_conn { __le16 scan_interval; __le16 scan_window; __u8 filter_policy; __u8 peer_addr_type; bdaddr_t peer_addr; __u8 own_address_type; __le16 conn_interval_min; __le16 conn_interval_max; __le16 conn_latency; __le16 supervision_timeout; __le16 min_ce_len; __le16 max_ce_len; } __packed; #define HCI_OP_LE_CREATE_CONN_CANCEL 0x200e #define HCI_OP_LE_READ_ACCEPT_LIST_SIZE 0x200f struct hci_rp_le_read_accept_list_size { __u8 status; __u8 size; } __packed; #define HCI_OP_LE_CLEAR_ACCEPT_LIST 0x2010 #define HCI_OP_LE_ADD_TO_ACCEPT_LIST 0x2011 struct hci_cp_le_add_to_accept_list { __u8 bdaddr_type; bdaddr_t bdaddr; } __packed; #define HCI_OP_LE_DEL_FROM_ACCEPT_LIST 0x2012 struct hci_cp_le_del_from_accept_list { __u8 bdaddr_type; bdaddr_t bdaddr; } __packed; #define HCI_OP_LE_CONN_UPDATE 0x2013 struct hci_cp_le_conn_update { __le16 handle; __le16 conn_interval_min; __le16 conn_interval_max; __le16 conn_latency; __le16 supervision_timeout; __le16 min_ce_len; __le16 max_ce_len; } __packed; #define HCI_OP_LE_READ_REMOTE_FEATURES 0x2016 struct hci_cp_le_read_remote_features { __le16 handle; } __packed; #define HCI_OP_LE_START_ENC 0x2019 struct hci_cp_le_start_enc { __le16 handle; __le64 rand; __le16 ediv; __u8 ltk[16]; } __packed; #define HCI_OP_LE_LTK_REPLY 0x201a struct hci_cp_le_ltk_reply { __le16 handle; __u8 ltk[16]; } __packed; struct hci_rp_le_ltk_reply { __u8 status; __le16 handle; } __packed; #define HCI_OP_LE_LTK_NEG_REPLY 0x201b struct hci_cp_le_ltk_neg_reply { __le16 handle; } __packed; struct hci_rp_le_ltk_neg_reply { __u8 status; __le16 handle; } __packed; #define HCI_OP_LE_READ_SUPPORTED_STATES 0x201c struct hci_rp_le_read_supported_states { __u8 status; __u8 le_states[8]; } __packed; #define HCI_OP_LE_CONN_PARAM_REQ_REPLY 0x2020 struct hci_cp_le_conn_param_req_reply { __le16 handle; __le16 interval_min; __le16 interval_max; __le16 latency; __le16 timeout; __le16 min_ce_len; __le16 max_ce_len; } __packed; #define HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY 0x2021 struct hci_cp_le_conn_param_req_neg_reply { __le16 handle; __u8 reason; } __packed; #define HCI_OP_LE_SET_DATA_LEN 0x2022 struct hci_cp_le_set_data_len { __le16 handle; __le16 tx_len; __le16 tx_time; } __packed; struct hci_rp_le_set_data_len { __u8 status; __le16 handle; } __packed; #define HCI_OP_LE_READ_DEF_DATA_LEN 0x2023 struct hci_rp_le_read_def_data_len { __u8 status; __le16 tx_len; __le16 tx_time; } __packed; #define HCI_OP_LE_WRITE_DEF_DATA_LEN 0x2024 struct hci_cp_le_write_def_data_len { __le16 tx_len; __le16 tx_time; } __packed; #define HCI_OP_LE_ADD_TO_RESOLV_LIST 0x2027 struct hci_cp_le_add_to_resolv_list { __u8 bdaddr_type; bdaddr_t bdaddr; __u8 peer_irk[16]; __u8 local_irk[16]; } __packed; #define HCI_OP_LE_DEL_FROM_RESOLV_LIST 0x2028 struct hci_cp_le_del_from_resolv_list { __u8 bdaddr_type; bdaddr_t bdaddr; } __packed; #define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029 #define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a struct hci_rp_le_read_resolv_list_size { __u8 status; __u8 size; } __packed; #define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d #define HCI_OP_LE_SET_RPA_TIMEOUT 0x202e #define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f struct hci_rp_le_read_max_data_len { __u8 status; __le16 tx_len; __le16 tx_time; __le16 rx_len; __le16 rx_time; } __packed; #define HCI_OP_LE_SET_DEFAULT_PHY 0x2031 struct hci_cp_le_set_default_phy { __u8 all_phys; __u8 tx_phys; __u8 rx_phys; } __packed; #define HCI_LE_SET_PHY_1M 0x01 #define HCI_LE_SET_PHY_2M 0x02 #define HCI_LE_SET_PHY_CODED 0x04 #define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041 struct hci_cp_le_set_ext_scan_params { __u8 own_addr_type; __u8 filter_policy; __u8 scanning_phys; __u8 data[]; } __packed; #define LE_SCAN_PHY_1M 0x01 #define LE_SCAN_PHY_2M 0x02 #define LE_SCAN_PHY_CODED 0x04 struct hci_cp_le_scan_phy_params { __u8 type; __le16 interval; __le16 window; } __packed; #define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042 struct hci_cp_le_set_ext_scan_enable { __u8 enable; __u8 filter_dup; __le16 duration; __le16 period; } __packed; #define HCI_OP_LE_EXT_CREATE_CONN 0x2043 struct hci_cp_le_ext_create_conn { __u8 filter_policy; __u8 own_addr_type; __u8 peer_addr_type; bdaddr_t peer_addr; __u8 phys; __u8 data[]; } __packed; struct hci_cp_le_ext_conn_param { __le16 scan_interval; __le16 scan_window; __le16 conn_interval_min; __le16 conn_interval_max; __le16 conn_latency; __le16 supervision_timeout; __le16 min_ce_len; __le16 max_ce_len; } __packed; #define HCI_OP_LE_PA_CREATE_SYNC 0x2044 struct hci_cp_le_pa_create_sync { __u8 options; __u8 sid; __u8 addr_type; bdaddr_t addr; __le16 skip; __le16 sync_timeout; __u8 sync_cte_type; } __packed; #define HCI_OP_LE_PA_CREATE_SYNC_CANCEL 0x2045 #define HCI_OP_LE_PA_TERM_SYNC 0x2046 struct hci_cp_le_pa_term_sync { __le16 handle; } __packed; #define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b struct hci_rp_le_read_num_supported_adv_sets { __u8 status; __u8 num_of_sets; } __packed; #define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036 struct hci_cp_le_set_ext_adv_params { __u8 handle; __le16 evt_properties; __u8 min_interval[3]; __u8 max_interval[3]; __u8 channel_map; __u8 own_addr_type; __u8 peer_addr_type; bdaddr_t peer_addr; __u8 filter_policy; __u8 tx_power; __u8 primary_phy; __u8 secondary_max_skip; __u8 secondary_phy; __u8 sid; __u8 notif_enable; } __packed; #define HCI_ADV_PHY_1M 0X01 #define HCI_ADV_PHY_2M 0x02 #define HCI_ADV_PHY_CODED 0x03 struct hci_rp_le_set_ext_adv_params { __u8 status; __u8 tx_power; } __packed; struct hci_cp_ext_adv_set { __u8 handle; __le16 duration; __u8 max_events; } __packed; #define HCI_MAX_EXT_AD_LENGTH 251 #define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037 struct hci_cp_le_set_ext_adv_data { __u8 handle; __u8 operation; __u8 frag_pref; __u8 length; __u8 data[] __counted_by(length); } __packed; #define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038 struct hci_cp_le_set_ext_scan_rsp_data { __u8 handle; __u8 operation; __u8 frag_pref; __u8 length; __u8 data[] __counted_by(length); } __packed; #define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039 struct hci_cp_le_set_ext_adv_enable { __u8 enable; __u8 num_of_sets; __u8 data[]; } __packed; #define HCI_OP_LE_SET_PER_ADV_PARAMS 0x203e struct hci_cp_le_set_per_adv_params { __u8 handle; __le16 min_interval; __le16 max_interval; __le16 periodic_properties; } __packed; #define HCI_MAX_PER_AD_LENGTH 252 #define HCI_MAX_PER_AD_TOT_LEN 1650 #define HCI_OP_LE_SET_PER_ADV_DATA 0x203f struct hci_cp_le_set_per_adv_data { __u8 handle; __u8 operation; __u8 length; __u8 data[] __counted_by(length); } __packed; #define HCI_OP_LE_SET_PER_ADV_ENABLE 0x2040 struct hci_cp_le_set_per_adv_enable { __u8 enable; __u8 handle; } __packed; #define LE_SET_ADV_DATA_OP_COMPLETE 0x03 #define LE_SET_ADV_DATA_NO_FRAG 0x01 #define HCI_OP_LE_REMOVE_ADV_SET 0x203c #define HCI_OP_LE_CLEAR_ADV_SETS 0x203d #define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035 struct hci_cp_le_set_adv_set_rand_addr { __u8 handle; bdaddr_t bdaddr; } __packed; #define HCI_OP_LE_READ_TRANSMIT_POWER 0x204b struct hci_rp_le_read_transmit_power { __u8 status; __s8 min_le_tx_power; __s8 max_le_tx_power; } __packed; #define HCI_NETWORK_PRIVACY 0x00 #define HCI_DEVICE_PRIVACY 0x01 #define HCI_OP_LE_SET_PRIVACY_MODE 0x204e struct hci_cp_le_set_privacy_mode { __u8 bdaddr_type; bdaddr_t bdaddr; __u8 mode; } __packed; #define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060 struct hci_rp_le_read_buffer_size_v2 { __u8 status; __le16 acl_mtu; __u8 acl_max_pkt; __le16 iso_mtu; __u8 iso_max_pkt; } __packed; #define HCI_OP_LE_READ_ISO_TX_SYNC 0x2061 struct hci_cp_le_read_iso_tx_sync { __le16 handle; } __packed; struct hci_rp_le_read_iso_tx_sync { __u8 status; __le16 handle; __le16 seq; __le32 imestamp; __u8 offset[3]; } __packed; #define HCI_OP_LE_SET_CIG_PARAMS 0x2062 struct hci_cis_params { __u8 cis_id; __le16 c_sdu; __le16 p_sdu; __u8 c_phy; __u8 p_phy; __u8 c_rtn; __u8 p_rtn; } __packed; struct hci_cp_le_set_cig_params { __u8 cig_id; __u8 c_interval[3]; __u8 p_interval[3]; __u8 sca; __u8 packing; __u8 framing; __le16 c_latency; __le16 p_latency; __u8 num_cis; struct hci_cis_params cis[] __counted_by(num_cis); } __packed; struct hci_rp_le_set_cig_params { __u8 status; __u8 cig_id; __u8 num_handles; __le16 handle[]; } __packed; #define HCI_OP_LE_CREATE_CIS 0x2064 struct hci_cis { __le16 cis_handle; __le16 acl_handle; } __packed; struct hci_cp_le_create_cis { __u8 num_cis; struct hci_cis cis[] __counted_by(num_cis); } __packed; #define HCI_OP_LE_REMOVE_CIG 0x2065 struct hci_cp_le_remove_cig { __u8 cig_id; } __packed; #define HCI_OP_LE_ACCEPT_CIS 0x2066 struct hci_cp_le_accept_cis { __le16 handle; } __packed; #define HCI_OP_LE_REJECT_CIS 0x2067 struct hci_cp_le_reject_cis { __le16 handle; __u8 reason; } __packed; #define HCI_OP_LE_CREATE_BIG 0x2068 struct hci_bis { __u8 sdu_interval[3]; __le16 sdu; __le16 latency; __u8 rtn; __u8 phy; __u8 packing; __u8 framing; __u8 encryption; __u8 bcode[16]; } __packed; struct hci_cp_le_create_big { __u8 handle; __u8 adv_handle; __u8 num_bis; struct hci_bis bis; } __packed; #define HCI_OP_LE_TERM_BIG 0x206a struct hci_cp_le_term_big { __u8 handle; __u8 reason; } __packed; #define HCI_OP_LE_BIG_CREATE_SYNC 0x206b struct hci_cp_le_big_create_sync { __u8 handle; __le16 sync_handle; __u8 encryption; __u8 bcode[16]; __u8 mse; __le16 timeout; __u8 num_bis; __u8 bis[] __counted_by(num_bis); } __packed; #define HCI_OP_LE_BIG_TERM_SYNC 0x206c struct hci_cp_le_big_term_sync { __u8 handle; } __packed; #define HCI_OP_LE_SETUP_ISO_PATH 0x206e struct hci_cp_le_setup_iso_path { __le16 handle; __u8 direction; __u8 path; __u8 codec; __le16 codec_cid; __le16 codec_vid; __u8 delay[3]; __u8 codec_cfg_len; __u8 codec_cfg[]; } __packed; struct hci_rp_le_setup_iso_path { __u8 status; __le16 handle; } __packed; #define HCI_OP_LE_SET_HOST_FEATURE 0x2074 struct hci_cp_le_set_host_feature { __u8 bit_number; __u8 bit_value; } __packed; /* ---- HCI Events ---- */ struct hci_ev_status { __u8 status; } __packed; #define HCI_EV_INQUIRY_COMPLETE 0x01 #define HCI_EV_INQUIRY_RESULT 0x02 struct inquiry_info { bdaddr_t bdaddr; __u8 pscan_rep_mode; __u8 pscan_period_mode; __u8 pscan_mode; __u8 dev_class[3]; __le16 clock_offset; } __packed; struct hci_ev_inquiry_result { __u8 num; struct inquiry_info info[]; }; #define HCI_EV_CONN_COMPLETE 0x03 struct hci_ev_conn_complete { __u8 status; __le16 handle; bdaddr_t bdaddr; __u8 link_type; __u8 encr_mode; } __packed; #define HCI_EV_CONN_REQUEST 0x04 struct hci_ev_conn_request { bdaddr_t bdaddr; __u8 dev_class[3]; __u8 link_type; } __packed; #define HCI_EV_DISCONN_COMPLETE 0x05 struct hci_ev_disconn_complete { __u8 status; __le16 handle; __u8 reason; } __packed; #define HCI_EV_AUTH_COMPLETE 0x06 struct hci_ev_auth_complete { __u8 status; __le16 handle; } __packed; #define HCI_EV_REMOTE_NAME 0x07 struct hci_ev_remote_name { __u8 status; bdaddr_t bdaddr; __u8 name[HCI_MAX_NAME_LENGTH]; } __packed; #define HCI_EV_ENCRYPT_CHANGE 0x08 struct hci_ev_encrypt_change { __u8 status; __le16 handle; __u8 encrypt; } __packed; #define HCI_EV_CHANGE_LINK_KEY_COMPLETE 0x09 struct hci_ev_change_link_key_complete { __u8 status; __le16 handle; } __packed; #define HCI_EV_REMOTE_FEATURES 0x0b struct hci_ev_remote_features { __u8 status; __le16 handle; __u8 features[8]; } __packed; #define HCI_EV_REMOTE_VERSION 0x0c struct hci_ev_remote_version { __u8 status; __le16 handle; __u8 lmp_ver; __le16 manufacturer; __le16 lmp_subver; } __packed; #define HCI_EV_QOS_SETUP_COMPLETE 0x0d struct hci_qos { __u8 service_type; __u32 token_rate; __u32 peak_bandwidth; __u32 latency; __u32 delay_variation; } __packed; struct hci_ev_qos_setup_complete { __u8 status; __le16 handle; struct hci_qos qos; } __packed; #define HCI_EV_CMD_COMPLETE 0x0e struct hci_ev_cmd_complete { __u8 ncmd; __le16 opcode; } __packed; #define HCI_EV_CMD_STATUS 0x0f struct hci_ev_cmd_status { __u8 status; __u8 ncmd; __le16 opcode; } __packed; #define HCI_EV_HARDWARE_ERROR 0x10 struct hci_ev_hardware_error { __u8 code; } __packed; #define HCI_EV_ROLE_CHANGE 0x12 struct hci_ev_role_change { __u8 status; bdaddr_t bdaddr; __u8 role; } __packed; #define HCI_EV_NUM_COMP_PKTS 0x13 struct hci_comp_pkts_info { __le16 handle; __le16 count; } __packed; struct hci_ev_num_comp_pkts { __u8 num; struct hci_comp_pkts_info handles[]; } __packed; #define HCI_EV_MODE_CHANGE 0x14 struct hci_ev_mode_change { __u8 status; __le16 handle; __u8 mode; __le16 interval; } __packed; #define HCI_EV_PIN_CODE_REQ 0x16 struct hci_ev_pin_code_req { bdaddr_t bdaddr; } __packed; #define HCI_EV_LINK_KEY_REQ 0x17 struct hci_ev_link_key_req { bdaddr_t bdaddr; } __packed; #define HCI_EV_LINK_KEY_NOTIFY 0x18 struct hci_ev_link_key_notify { bdaddr_t bdaddr; __u8 link_key[HCI_LINK_KEY_SIZE]; __u8 key_type; } __packed; #define HCI_EV_CLOCK_OFFSET 0x1c struct hci_ev_clock_offset { __u8 status; __le16 handle; __le16 clock_offset; } __packed; #define HCI_EV_PKT_TYPE_CHANGE 0x1d struct hci_ev_pkt_type_change { __u8 status; __le16 handle; __le16 pkt_type; } __packed; #define HCI_EV_PSCAN_REP_MODE 0x20 struct hci_ev_pscan_rep_mode { bdaddr_t bdaddr; __u8 pscan_rep_mode; } __packed; #define HCI_EV_INQUIRY_RESULT_WITH_RSSI 0x22 struct inquiry_info_rssi { bdaddr_t bdaddr; __u8 pscan_rep_mode; __u8 pscan_period_mode; __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; } __packed; struct inquiry_info_rssi_pscan { bdaddr_t bdaddr; __u8 pscan_rep_mode; __u8 pscan_period_mode; __u8 pscan_mode; __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; } __packed; struct hci_ev_inquiry_result_rssi { __u8 num; __u8 data[]; } __packed; #define HCI_EV_REMOTE_EXT_FEATURES 0x23 struct hci_ev_remote_ext_features { __u8 status; __le16 handle; __u8 page; __u8 max_page; __u8 features[8]; } __packed; #define HCI_EV_SYNC_CONN_COMPLETE 0x2c struct hci_ev_sync_conn_complete { __u8 status; __le16 handle; bdaddr_t bdaddr; __u8 link_type; __u8 tx_interval; __u8 retrans_window; __le16 rx_pkt_len; __le16 tx_pkt_len; __u8 air_mode; } __packed; #define HCI_EV_SYNC_CONN_CHANGED 0x2d struct hci_ev_sync_conn_changed { __u8 status; __le16 handle; __u8 tx_interval; __u8 retrans_window; __le16 rx_pkt_len; __le16 tx_pkt_len; } __packed; #define HCI_EV_SNIFF_SUBRATE 0x2e struct hci_ev_sniff_subrate { __u8 status; __le16 handle; __le16 max_tx_latency; __le16 max_rx_latency; __le16 max_remote_timeout; __le16 max_local_timeout; } __packed; #define HCI_EV_EXTENDED_INQUIRY_RESULT 0x2f struct extended_inquiry_info { bdaddr_t bdaddr; __u8 pscan_rep_mode; __u8 pscan_period_mode; __u8 dev_class[3]; __le16 clock_offset; __s8 rssi; __u8 data[240]; } __packed; struct hci_ev_ext_inquiry_result { __u8 num; struct extended_inquiry_info info[]; } __packed; #define HCI_EV_KEY_REFRESH_COMPLETE 0x30 struct hci_ev_key_refresh_complete { __u8 status; __le16 handle; } __packed; #define HCI_EV_IO_CAPA_REQUEST 0x31 struct hci_ev_io_capa_request { bdaddr_t bdaddr; } __packed; #define HCI_EV_IO_CAPA_REPLY 0x32 struct hci_ev_io_capa_reply { bdaddr_t bdaddr; __u8 capability; __u8 oob_data; __u8 authentication; } __packed; #define HCI_EV_USER_CONFIRM_REQUEST 0x33 struct hci_ev_user_confirm_req { bdaddr_t bdaddr; __le32 passkey; } __packed; #define HCI_EV_USER_PASSKEY_REQUEST 0x34 struct hci_ev_user_passkey_req { bdaddr_t bdaddr; } __packed; #define HCI_EV_REMOTE_OOB_DATA_REQUEST 0x35 struct hci_ev_remote_oob_data_request { bdaddr_t bdaddr; } __packed; #define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36 struct hci_ev_simple_pair_complete { __u8 status; bdaddr_t bdaddr; } __packed; #define HCI_EV_USER_PASSKEY_NOTIFY 0x3b struct hci_ev_user_passkey_notify { bdaddr_t bdaddr; __le32 passkey; } __packed; #define HCI_KEYPRESS_STARTED 0 #define HCI_KEYPRESS_ENTERED 1 #define HCI_KEYPRESS_ERASED 2 #define HCI_KEYPRESS_CLEARED 3 #define HCI_KEYPRESS_COMPLETED 4 #define HCI_EV_KEYPRESS_NOTIFY 0x3c struct hci_ev_keypress_notify { bdaddr_t bdaddr; __u8 type; } __packed; #define HCI_EV_REMOTE_HOST_FEATURES 0x3d struct hci_ev_remote_host_features { bdaddr_t bdaddr; __u8 features[8]; } __packed; #define HCI_EV_LE_META 0x3e struct hci_ev_le_meta { __u8 subevent; } __packed; #define HCI_EV_PHY_LINK_COMPLETE 0x40 struct hci_ev_phy_link_complete { __u8 status; __u8 phy_handle; } __packed; #define HCI_EV_CHANNEL_SELECTED 0x41 struct hci_ev_channel_selected { __u8 phy_handle; } __packed; #define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42 struct hci_ev_disconn_phy_link_complete { __u8 status; __u8 phy_handle; __u8 reason; } __packed; #define HCI_EV_LOGICAL_LINK_COMPLETE 0x45 struct hci_ev_logical_link_complete { __u8 status; __le16 handle; __u8 phy_handle; __u8 flow_spec_id; } __packed; #define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46 struct hci_ev_disconn_logical_link_complete { __u8 status; __le16 handle; __u8 reason; } __packed; #define HCI_EV_NUM_COMP_BLOCKS 0x48 struct hci_comp_blocks_info { __le16 handle; __le16 pkts; __le16 blocks; } __packed; struct hci_ev_num_comp_blocks { __le16 num_blocks; __u8 num_hndl; struct hci_comp_blocks_info handles[]; } __packed; #define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F struct hci_ev_sync_train_complete { __u8 status; } __packed; #define HCI_EV_PERIPHERAL_PAGE_RESP_TIMEOUT 0x54 #define HCI_EV_LE_CONN_COMPLETE 0x01 struct hci_ev_le_conn_complete { __u8 status; __le16 handle; __u8 role; __u8 bdaddr_type; bdaddr_t bdaddr; __le16 interval; __le16 latency; __le16 supervision_timeout; __u8 clk_accurancy; } __packed; /* Advertising report event types */ #define LE_ADV_IND 0x00 #define LE_ADV_DIRECT_IND 0x01 #define LE_ADV_SCAN_IND 0x02 #define LE_ADV_NONCONN_IND 0x03 #define LE_ADV_SCAN_RSP 0x04 #define LE_ADV_INVALID 0x05 /* Legacy event types in extended adv report */ #define LE_LEGACY_ADV_IND 0x0013 #define LE_LEGACY_ADV_DIRECT_IND 0x0015 #define LE_LEGACY_ADV_SCAN_IND 0x0012 #define LE_LEGACY_NONCONN_IND 0x0010 #define LE_LEGACY_SCAN_RSP_ADV 0x001b #define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a /* Extended Advertising event types */ #define LE_EXT_ADV_NON_CONN_IND 0x0000 #define LE_EXT_ADV_CONN_IND 0x0001 #define LE_EXT_ADV_SCAN_IND 0x0002 #define LE_EXT_ADV_DIRECT_IND 0x0004 #define LE_EXT_ADV_SCAN_RSP 0x0008 #define LE_EXT_ADV_LEGACY_PDU 0x0010 #define LE_EXT_ADV_EVT_TYPE_MASK 0x007f #define ADDR_LE_DEV_PUBLIC 0x00 #define ADDR_LE_DEV_RANDOM 0x01 #define ADDR_LE_DEV_PUBLIC_RESOLVED 0x02 #define ADDR_LE_DEV_RANDOM_RESOLVED 0x03 #define HCI_EV_LE_ADVERTISING_REPORT 0x02 struct hci_ev_le_advertising_info { __u8 type; __u8 bdaddr_type; bdaddr_t bdaddr; __u8 length; __u8 data[]; } __packed; struct hci_ev_le_advertising_report { __u8 num; struct hci_ev_le_advertising_info info[]; } __packed; #define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03 struct hci_ev_le_conn_update_complete { __u8 status; __le16 handle; __le16 interval; __le16 latency; __le16 supervision_timeout; } __packed; #define HCI_EV_LE_REMOTE_FEAT_COMPLETE 0x04 struct hci_ev_le_remote_feat_complete { __u8 status; __le16 handle; __u8 features[8]; } __packed; #define HCI_EV_LE_LTK_REQ 0x05 struct hci_ev_le_ltk_req { __le16 handle; __le64 rand; __le16 ediv; } __packed; #define HCI_EV_LE_REMOTE_CONN_PARAM_REQ 0x06 struct hci_ev_le_remote_conn_param_req { __le16 handle; __le16 interval_min; __le16 interval_max; __le16 latency; __le16 timeout; } __packed; #define HCI_EV_LE_DATA_LEN_CHANGE 0x07 struct hci_ev_le_data_len_change { __le16 handle; __le16 tx_len; __le16 tx_time; __le16 rx_len; __le16 rx_time; } __packed; #define HCI_EV_LE_DIRECT_ADV_REPORT 0x0B struct hci_ev_le_direct_adv_info { __u8 type; __u8 bdaddr_type; bdaddr_t bdaddr; __u8 direct_addr_type; bdaddr_t direct_addr; __s8 rssi; } __packed; struct hci_ev_le_direct_adv_report { __u8 num; struct hci_ev_le_direct_adv_info info[]; } __packed; #define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c struct hci_ev_le_phy_update_complete { __u8 status; __le16 handle; __u8 tx_phy; __u8 rx_phy; } __packed; #define HCI_EV_LE_EXT_ADV_REPORT 0x0d struct hci_ev_le_ext_adv_info { __le16 type; __u8 bdaddr_type; bdaddr_t bdaddr; __u8 primary_phy; __u8 secondary_phy; __u8 sid; __u8 tx_power; __s8 rssi; __le16 interval; __u8 direct_addr_type; bdaddr_t direct_addr; __u8 length; __u8 data[]; } __packed; struct hci_ev_le_ext_adv_report { __u8 num; struct hci_ev_le_ext_adv_info info[]; } __packed; #define HCI_EV_LE_PA_SYNC_ESTABLISHED 0x0e struct hci_ev_le_pa_sync_established { __u8 status; __le16 handle; __u8 sid; __u8 bdaddr_type; bdaddr_t bdaddr; __u8 phy; __le16 interval; __u8 clock_accuracy; } __packed; #define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a struct hci_ev_le_enh_conn_complete { __u8 status; __le16 handle; __u8 role; __u8 bdaddr_type; bdaddr_t bdaddr; bdaddr_t local_rpa; bdaddr_t peer_rpa; __le16 interval; __le16 latency; __le16 supervision_timeout; __u8 clk_accurancy; } __packed; #define HCI_EV_LE_PER_ADV_REPORT 0x0f struct hci_ev_le_per_adv_report { __le16 sync_handle; __u8 tx_power; __u8 rssi; __u8 cte_type; __u8 data_status; __u8 length; __u8 data[]; } __packed; #define LE_PA_DATA_COMPLETE 0x00 #define LE_PA_DATA_MORE_TO_COME 0x01 #define LE_PA_DATA_TRUNCATED 0x02 #define HCI_EV_LE_EXT_ADV_SET_TERM 0x12 struct hci_evt_le_ext_adv_set_term { __u8 status; __u8 handle; __le16 conn_handle; __u8 num_evts; } __packed; #define HCI_EVT_LE_CIS_ESTABLISHED 0x19 struct hci_evt_le_cis_established { __u8 status; __le16 handle; __u8 cig_sync_delay[3]; __u8 cis_sync_delay[3]; __u8 c_latency[3]; __u8 p_latency[3]; __u8 c_phy; __u8 p_phy; __u8 nse; __u8 c_bn; __u8 p_bn; __u8 c_ft; __u8 p_ft; __le16 c_mtu; __le16 p_mtu; __le16 interval; } __packed; #define HCI_EVT_LE_CIS_REQ 0x1a struct hci_evt_le_cis_req { __le16 acl_handle; __le16 cis_handle; __u8 cig_id; __u8 cis_id; } __packed; #define HCI_EVT_LE_CREATE_BIG_COMPLETE 0x1b struct hci_evt_le_create_big_complete { __u8 status; __u8 handle; __u8 sync_delay[3]; __u8 transport_delay[3]; __u8 phy; __u8 nse; __u8 bn; __u8 pto; __u8 irc; __le16 max_pdu; __le16 interval; __u8 num_bis; __le16 bis_handle[]; } __packed; #define HCI_EVT_LE_BIG_SYNC_ESTABLISHED 0x1d struct hci_evt_le_big_sync_estabilished { __u8 status; __u8 handle; __u8 latency[3]; __u8 nse; __u8 bn; __u8 pto; __u8 irc; __le16 max_pdu; __le16 interval; __u8 num_bis; __le16 bis[]; } __packed; #define HCI_EVT_LE_BIG_INFO_ADV_REPORT 0x22 struct hci_evt_le_big_info_adv_report { __le16 sync_handle; __u8 num_bis; __u8 nse; __le16 iso_interval; __u8 bn; __u8 pto; __u8 irc; __le16 max_pdu; __u8 sdu_interval[3]; __le16 max_sdu; __u8 phy; __u8 framing; __u8 encryption; } __packed; #define HCI_EV_VENDOR 0xff /* Internal events generated by Bluetooth stack */ #define HCI_EV_STACK_INTERNAL 0xfd struct hci_ev_stack_internal { __u16 type; __u8 data[]; } __packed; #define HCI_EV_SI_DEVICE 0x01 struct hci_ev_si_device { __u16 event; __u16 dev_id; } __packed; #define HCI_EV_SI_SECURITY 0x02 struct hci_ev_si_security { __u16 event; __u16 proto; __u16 subproto; __u8 incoming; } __packed; /* ---- HCI Packet structures ---- */ #define HCI_COMMAND_HDR_SIZE 3 #define HCI_EVENT_HDR_SIZE 2 #define HCI_ACL_HDR_SIZE 4 #define HCI_SCO_HDR_SIZE 3 #define HCI_ISO_HDR_SIZE 4 struct hci_command_hdr { __le16 opcode; /* OCF & OGF */ __u8 plen; } __packed; struct hci_event_hdr { __u8 evt; __u8 plen; } __packed; struct hci_acl_hdr { __le16 handle; /* Handle & Flags(PB, BC) */ __le16 dlen; } __packed; struct hci_sco_hdr { __le16 handle; __u8 dlen; } __packed; struct hci_iso_hdr { __le16 handle; __le16 dlen; __u8 data[]; } __packed; /* ISO data packet status flags */ #define HCI_ISO_STATUS_VALID 0x00 #define HCI_ISO_STATUS_INVALID 0x01 #define HCI_ISO_STATUS_NOP 0x02 #define HCI_ISO_DATA_HDR_SIZE 4 struct hci_iso_data_hdr { __le16 sn; __le16 slen; }; #define HCI_ISO_TS_DATA_HDR_SIZE 8 struct hci_iso_ts_data_hdr { __le32 ts; __le16 sn; __le16 slen; }; static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) { return (struct hci_event_hdr *) skb->data; } static inline struct hci_acl_hdr *hci_acl_hdr(const struct sk_buff *skb) { return (struct hci_acl_hdr *) skb->data; } static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) { return (struct hci_sco_hdr *) skb->data; } static inline struct hci_iso_hdr *hci_iso_hdr(const struct sk_buff *skb) { return (struct hci_iso_hdr *)skb->data; } /* Command opcode pack/unpack */ #define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10))) #define hci_opcode_ogf(op) (op >> 10) #define hci_opcode_ocf(op) (op & 0x03ff) /* ACL handle and flags pack/unpack */ #define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12))) #define hci_handle(h) (h & 0x0fff) #define hci_flags(h) (h >> 12) /* ISO handle and flags pack/unpack */ #define hci_iso_flags_pb(f) (f & 0x0003) #define hci_iso_flags_ts(f) ((f >> 2) & 0x0001) #define hci_iso_flags_pack(pb, ts) ((pb & 0x03) | ((ts & 0x01) << 2)) /* ISO data length and flags pack/unpack */ #define hci_iso_data_len_pack(h, f) ((__u16) ((h) | ((f) << 14))) #define hci_iso_data_len(h) ((h) & 0x3fff) #define hci_iso_data_flags(h) ((h) >> 14) /* codec transport types */ #define HCI_TRANSPORT_SCO_ESCO 0x01 /* le24 support */ static inline void hci_cpu_to_le24(__u32 val, __u8 dst[3]) { dst[0] = val & 0xff; dst[1] = (val & 0xff00) >> 8; dst[2] = (val & 0xff0000) >> 16; } #endif /* __HCI_H */ |
| 661 892 893 7 4 4 7 988 953 994 32 382 32 32 30 32 32 415 415 304 297 297 414 1018 1013 239 229 230 230 866 169 171 1019 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/l3mdev/l3mdev.c - L3 master device implementation * Copyright (c) 2015 Cumulus Networks * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com> */ #include <linux/netdevice.h> #include <net/fib_rules.h> #include <net/l3mdev.h> static DEFINE_SPINLOCK(l3mdev_lock); struct l3mdev_handler { lookup_by_table_id_t dev_lookup; }; static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1]; static int l3mdev_check_type(enum l3mdev_type l3type) { if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX) return -EINVAL; return 0; } int l3mdev_table_lookup_register(enum l3mdev_type l3type, lookup_by_table_id_t fn) { struct l3mdev_handler *hdlr; int res; res = l3mdev_check_type(l3type); if (res) return res; hdlr = &l3mdev_handlers[l3type]; spin_lock(&l3mdev_lock); if (hdlr->dev_lookup) { res = -EBUSY; goto unlock; } hdlr->dev_lookup = fn; res = 0; unlock: spin_unlock(&l3mdev_lock); return res; } EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register); void l3mdev_table_lookup_unregister(enum l3mdev_type l3type, lookup_by_table_id_t fn) { struct l3mdev_handler *hdlr; if (l3mdev_check_type(l3type)) return; hdlr = &l3mdev_handlers[l3type]; spin_lock(&l3mdev_lock); if (hdlr->dev_lookup == fn) hdlr->dev_lookup = NULL; spin_unlock(&l3mdev_lock); } EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister); int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net, u32 table_id) { lookup_by_table_id_t lookup; struct l3mdev_handler *hdlr; int ifindex = -EINVAL; int res; res = l3mdev_check_type(l3type); if (res) return res; hdlr = &l3mdev_handlers[l3type]; spin_lock(&l3mdev_lock); lookup = hdlr->dev_lookup; if (!lookup) goto unlock; ifindex = lookup(net, table_id); unlock: spin_unlock(&l3mdev_lock); return ifindex; } EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id); /** * l3mdev_master_ifindex_rcu - get index of L3 master device * @dev: targeted interface */ int l3mdev_master_ifindex_rcu(const struct net_device *dev) { int ifindex = 0; if (!dev) return 0; if (netif_is_l3_master(dev)) { ifindex = dev->ifindex; } else if (netif_is_l3_slave(dev)) { struct net_device *master; struct net_device *_dev = (struct net_device *)dev; /* netdev_master_upper_dev_get_rcu calls * list_first_or_null_rcu to walk the upper dev list. * list_first_or_null_rcu does not handle a const arg. We aren't * making changes, just want the master device from that list so * typecast to remove the const */ master = netdev_master_upper_dev_get_rcu(_dev); if (master) ifindex = master->ifindex; } return ifindex; } EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu); /** * l3mdev_master_upper_ifindex_by_index_rcu - get index of upper l3 master * device * @net: network namespace for device index lookup * @ifindex: targeted interface */ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex) { struct net_device *dev; dev = dev_get_by_index_rcu(net, ifindex); while (dev && !netif_is_l3_master(dev)) dev = netdev_master_upper_dev_get_rcu(dev); return dev ? dev->ifindex : 0; } EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu); /** * l3mdev_fib_table_rcu - get FIB table id associated with an L3 * master interface * @dev: targeted interface */ u32 l3mdev_fib_table_rcu(const struct net_device *dev) { u32 tb_id = 0; if (!dev) return 0; if (netif_is_l3_master(dev)) { if (dev->l3mdev_ops->l3mdev_fib_table) tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev); } else if (netif_is_l3_slave(dev)) { /* Users of netdev_master_upper_dev_get_rcu need non-const, * but current inet_*type functions take a const */ struct net_device *_dev = (struct net_device *) dev; const struct net_device *master; master = netdev_master_upper_dev_get_rcu(_dev); if (master && master->l3mdev_ops->l3mdev_fib_table) tb_id = master->l3mdev_ops->l3mdev_fib_table(master); } return tb_id; } EXPORT_SYMBOL_GPL(l3mdev_fib_table_rcu); u32 l3mdev_fib_table_by_index(struct net *net, int ifindex) { struct net_device *dev; u32 tb_id = 0; if (!ifindex) return 0; rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); if (dev) tb_id = l3mdev_fib_table_rcu(dev); rcu_read_unlock(); return tb_id; } EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index); /** * l3mdev_link_scope_lookup - IPv6 route lookup based on flow for link * local and multicast addresses * @net: network namespace for device index lookup * @fl6: IPv6 flow struct for lookup * This function does not hold refcnt on the returned dst. * Caller must hold rcu_read_lock(). */ struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6) { struct dst_entry *dst = NULL; struct net_device *dev; WARN_ON_ONCE(!rcu_read_lock_held()); if (fl6->flowi6_oif) { dev = dev_get_by_index_rcu(net, fl6->flowi6_oif); if (dev && netif_is_l3_slave(dev)) dev = netdev_master_upper_dev_get_rcu(dev); if (dev && netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_link_scope_lookup) dst = dev->l3mdev_ops->l3mdev_link_scope_lookup(dev, fl6); } return dst; } EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup); /** * l3mdev_fib_rule_match - Determine if flowi references an * L3 master device * @net: network namespace for device index lookup * @fl: flow struct * @arg: store the table the rule matched with here */ int l3mdev_fib_rule_match(struct net *net, struct flowi *fl, struct fib_lookup_arg *arg) { struct net_device *dev; int rc = 0; /* update flow ensures flowi_l3mdev is set when relevant */ if (!fl->flowi_l3mdev) return 0; rcu_read_lock(); dev = dev_get_by_index_rcu(net, fl->flowi_l3mdev); if (dev && netif_is_l3_master(dev) && dev->l3mdev_ops->l3mdev_fib_table) { arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev); rc = 1; } rcu_read_unlock(); return rc; } void l3mdev_update_flow(struct net *net, struct flowi *fl) { struct net_device *dev; rcu_read_lock(); if (fl->flowi_oif) { dev = dev_get_by_index_rcu(net, fl->flowi_oif); if (dev) { if (!fl->flowi_l3mdev) { fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev); fl->flowi_flags |= FLOWI_FLAG_L3MDEV_OIF; } /* oif set to L3mdev directs lookup to its table; * reset to avoid oif match in fib_lookup */ if (netif_is_l3_master(dev)) fl->flowi_oif = 0; goto out; } } if (fl->flowi_iif > LOOPBACK_IFINDEX && !fl->flowi_l3mdev) { dev = dev_get_by_index_rcu(net, fl->flowi_iif); if (dev) fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev); } out: rcu_read_unlock(); } EXPORT_SYMBOL_GPL(l3mdev_update_flow); |
| 15062 36 36 15060 1158 10 10 1161 95 94 95 95 95 5 5 215 22 203 38 286 51 53 530 31 530 500 500 35 108 38 108 9 285 9 9 4179 4182 10347 41 68 63 62 68 11 1246 52 15 190 969 933 12177 1117 58 57 58 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 | // SPDX-License-Identifier: GPL-2.0 /* * security/tomoyo/tomoyo.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/lsm_hooks.h> #include <uapi/linux/lsm.h> #include "common.h" /** * tomoyo_domain - Get "struct tomoyo_domain_info" for current thread. * * Returns pointer to "struct tomoyo_domain_info" for current thread. */ struct tomoyo_domain_info *tomoyo_domain(void) { struct tomoyo_task *s = tomoyo_task(current); if (s->old_domain_info && !current->in_execve) { atomic_dec(&s->old_domain_info->users); s->old_domain_info = NULL; } return s->domain_info; } /** * tomoyo_cred_prepare - Target for security_prepare_creds(). * * @new: Pointer to "struct cred". * @old: Pointer to "struct cred". * @gfp: Memory allocation flags. * * Returns 0. */ static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { /* Restore old_domain_info saved by previous execve() request. */ struct tomoyo_task *s = tomoyo_task(current); if (s->old_domain_info && !current->in_execve) { atomic_dec(&s->domain_info->users); s->domain_info = s->old_domain_info; s->old_domain_info = NULL; } return 0; } /** * tomoyo_bprm_committed_creds - Target for security_bprm_committed_creds(). * * @bprm: Pointer to "struct linux_binprm". */ static void tomoyo_bprm_committed_creds(const struct linux_binprm *bprm) { /* Clear old_domain_info saved by execve() request. */ struct tomoyo_task *s = tomoyo_task(current); atomic_dec(&s->old_domain_info->users); s->old_domain_info = NULL; } #ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER /** * tomoyo_bprm_creds_for_exec - Target for security_bprm_creds_for_exec(). * * @bprm: Pointer to "struct linux_binprm". * * Returns 0. */ static int tomoyo_bprm_creds_for_exec(struct linux_binprm *bprm) { /* * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested * for the first time. */ if (!tomoyo_policy_loaded) tomoyo_load_policy(bprm->filename); return 0; } #endif /** * tomoyo_bprm_check_security - Target for security_bprm_check(). * * @bprm: Pointer to "struct linux_binprm". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) { struct tomoyo_task *s = tomoyo_task(current); /* * Execute permission is checked against pathname passed to execve() * using current domain. */ if (!s->old_domain_info) { const int idx = tomoyo_read_lock(); const int err = tomoyo_find_next_domain(bprm); tomoyo_read_unlock(idx); return err; } /* * Read permission is checked against interpreters using next domain. */ return tomoyo_check_open_permission(s->domain_info, &bprm->file->f_path, O_RDONLY); } /** * tomoyo_inode_getattr - Target for security_inode_getattr(). * * @path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_inode_getattr(const struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, path, NULL); } /** * tomoyo_path_truncate - Target for security_path_truncate(). * * @path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_truncate(const struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL); } /** * tomoyo_file_truncate - Target for security_file_truncate(). * * @file: Pointer to "struct file". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_truncate(struct file *file) { return tomoyo_path_truncate(&file->f_path); } /** * tomoyo_path_unlink - Target for security_path_unlink(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_unlink(const struct path *parent, struct dentry *dentry) { struct path path = { .mnt = parent->mnt, .dentry = dentry }; return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL); } /** * tomoyo_path_mkdir - Target for security_path_mkdir(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @mode: DAC permission mode. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_mkdir(const struct path *parent, struct dentry *dentry, umode_t mode) { struct path path = { .mnt = parent->mnt, .dentry = dentry }; return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path, mode & S_IALLUGO); } /** * tomoyo_path_rmdir - Target for security_path_rmdir(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_rmdir(const struct path *parent, struct dentry *dentry) { struct path path = { .mnt = parent->mnt, .dentry = dentry }; return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL); } /** * tomoyo_path_symlink - Target for security_path_symlink(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @old_name: Symlink's content. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_symlink(const struct path *parent, struct dentry *dentry, const char *old_name) { struct path path = { .mnt = parent->mnt, .dentry = dentry }; return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name); } /** * tomoyo_path_mknod - Target for security_path_mknod(). * * @parent: Pointer to "struct path". * @dentry: Pointer to "struct dentry". * @mode: DAC permission mode. * @dev: Device attributes. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_mknod(const struct path *parent, struct dentry *dentry, umode_t mode, unsigned int dev) { struct path path = { .mnt = parent->mnt, .dentry = dentry }; int type = TOMOYO_TYPE_CREATE; const unsigned int perm = mode & S_IALLUGO; switch (mode & S_IFMT) { case S_IFCHR: type = TOMOYO_TYPE_MKCHAR; break; case S_IFBLK: type = TOMOYO_TYPE_MKBLOCK; break; default: goto no_dev; } return tomoyo_mkdev_perm(type, &path, perm, dev); no_dev: switch (mode & S_IFMT) { case S_IFIFO: type = TOMOYO_TYPE_MKFIFO; break; case S_IFSOCK: type = TOMOYO_TYPE_MKSOCK; break; } return tomoyo_path_number_perm(type, &path, perm); } /** * tomoyo_path_link - Target for security_path_link(). * * @old_dentry: Pointer to "struct dentry". * @new_dir: Pointer to "struct path". * @new_dentry: Pointer to "struct dentry". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_link(struct dentry *old_dentry, const struct path *new_dir, struct dentry *new_dentry) { struct path path1 = { .mnt = new_dir->mnt, .dentry = old_dentry }; struct path path2 = { .mnt = new_dir->mnt, .dentry = new_dentry }; return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2); } /** * tomoyo_path_rename - Target for security_path_rename(). * * @old_parent: Pointer to "struct path". * @old_dentry: Pointer to "struct dentry". * @new_parent: Pointer to "struct path". * @new_dentry: Pointer to "struct dentry". * @flags: Rename options. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_rename(const struct path *old_parent, struct dentry *old_dentry, const struct path *new_parent, struct dentry *new_dentry, const unsigned int flags) { struct path path1 = { .mnt = old_parent->mnt, .dentry = old_dentry }; struct path path2 = { .mnt = new_parent->mnt, .dentry = new_dentry }; if (flags & RENAME_EXCHANGE) { const int err = tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path2, &path1); if (err) return err; } return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2); } /** * tomoyo_file_fcntl - Target for security_file_fcntl(). * * @file: Pointer to "struct file". * @cmd: Command for fcntl(). * @arg: Argument for @cmd. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))) return 0; return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path, O_WRONLY | (arg & O_APPEND)); } /** * tomoyo_file_open - Target for security_file_open(). * * @f: Pointer to "struct file". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_open(struct file *f) { /* Don't check read permission here if called from execve(). */ /* Illogically, FMODE_EXEC is in f_flags, not f_mode. */ if (f->f_flags & __FMODE_EXEC) return 0; return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, f->f_flags); } /** * tomoyo_file_ioctl - Target for security_file_ioctl(). * * @file: Pointer to "struct file". * @cmd: Command for ioctl(). * @arg: Argument for @cmd. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return tomoyo_path_number_perm(TOMOYO_TYPE_IOCTL, &file->f_path, cmd); } /** * tomoyo_path_chmod - Target for security_path_chmod(). * * @path: Pointer to "struct path". * @mode: DAC permission mode. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chmod(const struct path *path, umode_t mode) { return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path, mode & S_IALLUGO); } /** * tomoyo_path_chown - Target for security_path_chown(). * * @path: Pointer to "struct path". * @uid: Owner ID. * @gid: Group ID. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chown(const struct path *path, kuid_t uid, kgid_t gid) { int error = 0; if (uid_valid(uid)) error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, from_kuid(&init_user_ns, uid)); if (!error && gid_valid(gid)) error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, from_kgid(&init_user_ns, gid)); return error; } /** * tomoyo_path_chroot - Target for security_path_chroot(). * * @path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_path_chroot(const struct path *path) { return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL); } /** * tomoyo_sb_mount - Target for security_sb_mount(). * * @dev_name: Name of device file. Maybe NULL. * @path: Pointer to "struct path". * @type: Name of filesystem type. Maybe NULL. * @flags: Mount options. * @data: Optional data. Maybe NULL. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_mount(const char *dev_name, const struct path *path, const char *type, unsigned long flags, void *data) { return tomoyo_mount_permission(dev_name, path, type, flags, data); } /** * tomoyo_sb_umount - Target for security_sb_umount(). * * @mnt: Pointer to "struct vfsmount". * @flags: Unmount options. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_umount(struct vfsmount *mnt, int flags) { struct path path = { .mnt = mnt, .dentry = mnt->mnt_root }; return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL); } /** * tomoyo_sb_pivotroot - Target for security_sb_pivotroot(). * * @old_path: Pointer to "struct path". * @new_path: Pointer to "struct path". * * Returns 0 on success, negative value otherwise. */ static int tomoyo_sb_pivotroot(const struct path *old_path, const struct path *new_path) { return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path); } /** * tomoyo_socket_listen - Check permission for listen(). * * @sock: Pointer to "struct socket". * @backlog: Backlog parameter. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_listen(struct socket *sock, int backlog) { return tomoyo_socket_listen_permission(sock); } /** * tomoyo_socket_connect - Check permission for connect(). * * @sock: Pointer to "struct socket". * @addr: Pointer to "struct sockaddr". * @addr_len: Size of @addr. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr, int addr_len) { return tomoyo_socket_connect_permission(sock, addr, addr_len); } /** * tomoyo_socket_bind - Check permission for bind(). * * @sock: Pointer to "struct socket". * @addr: Pointer to "struct sockaddr". * @addr_len: Size of @addr. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { return tomoyo_socket_bind_permission(sock, addr, addr_len); } /** * tomoyo_socket_sendmsg - Check permission for sendmsg(). * * @sock: Pointer to "struct socket". * @msg: Pointer to "struct msghdr". * @size: Size of message. * * Returns 0 on success, negative value otherwise. */ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { return tomoyo_socket_sendmsg_permission(sock, msg, size); } struct lsm_blob_sizes tomoyo_blob_sizes __ro_after_init = { .lbs_task = sizeof(struct tomoyo_task), }; /** * tomoyo_task_alloc - Target for security_task_alloc(). * * @task: Pointer to "struct task_struct". * @clone_flags: clone() flags. * * Returns 0. */ static int tomoyo_task_alloc(struct task_struct *task, unsigned long clone_flags) { struct tomoyo_task *old = tomoyo_task(current); struct tomoyo_task *new = tomoyo_task(task); new->domain_info = old->domain_info; atomic_inc(&new->domain_info->users); new->old_domain_info = NULL; return 0; } /** * tomoyo_task_free - Target for security_task_free(). * * @task: Pointer to "struct task_struct". */ static void tomoyo_task_free(struct task_struct *task) { struct tomoyo_task *s = tomoyo_task(task); if (s->domain_info) { atomic_dec(&s->domain_info->users); s->domain_info = NULL; } if (s->old_domain_info) { atomic_dec(&s->old_domain_info->users); s->old_domain_info = NULL; } } static const struct lsm_id tomoyo_lsmid = { .name = "tomoyo", .id = LSM_ID_TOMOYO, }; /* tomoyo_hooks is used for registering TOMOYO. */ static struct security_hook_list tomoyo_hooks[] __ro_after_init = { LSM_HOOK_INIT(cred_prepare, tomoyo_cred_prepare), LSM_HOOK_INIT(bprm_committed_creds, tomoyo_bprm_committed_creds), LSM_HOOK_INIT(task_alloc, tomoyo_task_alloc), LSM_HOOK_INIT(task_free, tomoyo_task_free), #ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER LSM_HOOK_INIT(bprm_creds_for_exec, tomoyo_bprm_creds_for_exec), #endif LSM_HOOK_INIT(bprm_check_security, tomoyo_bprm_check_security), LSM_HOOK_INIT(file_fcntl, tomoyo_file_fcntl), LSM_HOOK_INIT(file_open, tomoyo_file_open), LSM_HOOK_INIT(file_truncate, tomoyo_file_truncate), LSM_HOOK_INIT(path_truncate, tomoyo_path_truncate), LSM_HOOK_INIT(path_unlink, tomoyo_path_unlink), LSM_HOOK_INIT(path_mkdir, tomoyo_path_mkdir), LSM_HOOK_INIT(path_rmdir, tomoyo_path_rmdir), LSM_HOOK_INIT(path_symlink, tomoyo_path_symlink), LSM_HOOK_INIT(path_mknod, tomoyo_path_mknod), LSM_HOOK_INIT(path_link, tomoyo_path_link), LSM_HOOK_INIT(path_rename, tomoyo_path_rename), LSM_HOOK_INIT(inode_getattr, tomoyo_inode_getattr), LSM_HOOK_INIT(file_ioctl, tomoyo_file_ioctl), LSM_HOOK_INIT(file_ioctl_compat, tomoyo_file_ioctl), LSM_HOOK_INIT(path_chmod, tomoyo_path_chmod), LSM_HOOK_INIT(path_chown, tomoyo_path_chown), LSM_HOOK_INIT(path_chroot, tomoyo_path_chroot), LSM_HOOK_INIT(sb_mount, tomoyo_sb_mount), LSM_HOOK_INIT(sb_umount, tomoyo_sb_umount), LSM_HOOK_INIT(sb_pivotroot, tomoyo_sb_pivotroot), LSM_HOOK_INIT(socket_bind, tomoyo_socket_bind), LSM_HOOK_INIT(socket_connect, tomoyo_socket_connect), LSM_HOOK_INIT(socket_listen, tomoyo_socket_listen), LSM_HOOK_INIT(socket_sendmsg, tomoyo_socket_sendmsg), }; /* Lock for GC. */ DEFINE_SRCU(tomoyo_ss); int tomoyo_enabled __ro_after_init = 1; /** * tomoyo_init - Register TOMOYO Linux as a LSM module. * * Returns 0. */ static int __init tomoyo_init(void) { struct tomoyo_task *s = tomoyo_task(current); /* register ourselves with the security framework */ security_add_hooks(tomoyo_hooks, ARRAY_SIZE(tomoyo_hooks), &tomoyo_lsmid); pr_info("TOMOYO Linux initialized\n"); s->domain_info = &tomoyo_kernel_domain; atomic_inc(&tomoyo_kernel_domain.users); s->old_domain_info = NULL; tomoyo_mm_init(); return 0; } DEFINE_LSM(tomoyo) = { .name = "tomoyo", .enabled = &tomoyo_enabled, .flags = LSM_FLAG_LEGACY_MAJOR, .blobs = &tomoyo_blob_sizes, .init = tomoyo_init, }; |
| 4504 4502 7066 1124 4501 4507 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_BL_H #define _LINUX_RCULIST_BL_H /* * RCU-protected bl list version. See include/linux/list_bl.h. */ #include <linux/list_bl.h> #include <linux/rcupdate.h> static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h, struct hlist_bl_node *n) { LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != LIST_BL_LOCKMASK); rcu_assign_pointer(h->first, (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK)); } static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) { return (struct hlist_bl_node *) ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK); } /** * hlist_bl_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: hlist_bl_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_bl_add_head_rcu() * or hlist_bl_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_bl_for_each_entry(). */ static inline void hlist_bl_del_rcu(struct hlist_bl_node *n) { __hlist_bl_del(n); n->pprev = LIST_POISON2; } /** * hlist_bl_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_bl, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_bl_add_head_rcu() * or hlist_bl_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n, struct hlist_bl_head *h) { struct hlist_bl_node *first; /* don't need hlist_bl_first_rcu because we're under lock */ first = hlist_bl_first(h); n->next = first; if (first) first->pprev = &n->next; n->pprev = &h->first; /* need _rcu because we can have concurrent lock free readers */ hlist_bl_set_first_rcu(h, n); } /** * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_bl_node to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_bl_node within the struct. * */ #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \ for (pos = hlist_bl_first_rcu(head); \ pos && \ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(pos->next)) #endif |
| 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 | // SPDX-License-Identifier: GPL-2.0-or-later /* * MPLS GSO Support * * Authors: Simon Horman (horms@verge.net.au) * * Based on: GSO portions of net/ipv4/gre.c */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/module.h> #include <linux/netdev_features.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/gso.h> #include <net/mpls.h> static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; netdev_features_t mpls_features; u16 mac_len = skb->mac_len; __be16 mpls_protocol; unsigned int mpls_hlen; if (!skb_inner_network_header_was_set(skb)) goto out; skb_reset_network_header(skb); mpls_hlen = skb_inner_network_header(skb) - skb_network_header(skb); if (unlikely(!mpls_hlen || mpls_hlen % MPLS_HLEN)) goto out; if (unlikely(!pskb_may_pull(skb, mpls_hlen))) goto out; /* Setup inner SKB. */ mpls_protocol = skb->protocol; skb->protocol = skb->inner_protocol; __skb_pull(skb, mpls_hlen); skb->mac_len = 0; skb_reset_mac_header(skb); /* Segment inner packet. */ mpls_features = skb->dev->mpls_features & features; segs = skb_mac_gso_segment(skb, mpls_features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, mpls_protocol, mpls_hlen, mac_offset, mac_len); goto out; } skb = segs; mpls_hlen += mac_len; do { skb->mac_len = mac_len; skb->protocol = mpls_protocol; skb_reset_inner_network_header(skb); __skb_push(skb, mpls_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); } while ((skb = skb->next)); out: return segs; } static struct packet_offload mpls_mc_offload __read_mostly = { .type = cpu_to_be16(ETH_P_MPLS_MC), .priority = 15, .callbacks = { .gso_segment = mpls_gso_segment, }, }; static struct packet_offload mpls_uc_offload __read_mostly = { .type = cpu_to_be16(ETH_P_MPLS_UC), .priority = 15, .callbacks = { .gso_segment = mpls_gso_segment, }, }; static int __init mpls_gso_init(void) { pr_info("MPLS GSO support\n"); dev_add_offload(&mpls_uc_offload); dev_add_offload(&mpls_mc_offload); return 0; } static void __exit mpls_gso_exit(void) { dev_remove_offload(&mpls_uc_offload); dev_remove_offload(&mpls_mc_offload); } module_init(mpls_gso_init); module_exit(mpls_gso_exit); MODULE_DESCRIPTION("MPLS GSO support"); MODULE_AUTHOR("Simon Horman <horms@verge.net.au>"); MODULE_LICENSE("GPL"); |
| 1273 284 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _FIB_LOOKUP_H #define _FIB_LOOKUP_H #include <linux/types.h> #include <linux/list.h> #include <net/inet_dscp.h> #include <net/ip_fib.h> #include <net/nexthop.h> struct fib_alias { struct hlist_node fa_list; struct fib_info *fa_info; dscp_t fa_dscp; u8 fa_type; u8 fa_state; u8 fa_slen; u32 tb_id; s16 fa_default; u8 offload; u8 trap; u8 offload_failed; struct rcu_head rcu; }; #define FA_S_ACCESSED 0x01 /* Don't write on fa_state unless needed, to keep it shared on all cpus */ static inline void fib_alias_accessed(struct fib_alias *fa) { if (!(fa->fa_state & FA_S_ACCESSED)) fa->fa_state |= FA_S_ACCESSED; } /* Exported by fib_semantics.c */ void fib_release_info(struct fib_info *); struct fib_info *fib_create_info(struct fib_config *cfg, struct netlink_ext_ack *extack); int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi, struct netlink_ext_ack *extack); bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi); int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, const struct fib_rt_info *fri, unsigned int flags); void rtmsg_fib(int event, __be32 key, struct fib_alias *fa, int dst_len, u32 tb_id, const struct nl_info *info, unsigned int nlm_flags); size_t fib_nlmsg_size(struct fib_info *fi); static inline void fib_result_assign(struct fib_result *res, struct fib_info *fi) { /* we used to play games with refcounts, but we now use RCU */ res->fi = fi; res->nhc = fib_info_nhc(fi, 0); } struct fib_prop { int error; u8 scope; }; extern const struct fib_prop fib_props[RTN_MAX + 1]; #endif /* _FIB_LOOKUP_H */ |
| 332 333 28 23 329 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 | // SPDX-License-Identifier: GPL-2.0 /* * Implementation of the symbol table type. * * Author : Stephen Smalley, <stephen.smalley.work@gmail.com> */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include "symtab.h" static unsigned int symhash(const void *key) { /* * djb2a * Public domain from cdb v0.75 */ unsigned int hash = 5381; unsigned char c; while ((c = *(const unsigned char *)key++)) hash = ((hash << 5) + hash) ^ c; return hash; } static int symcmp(const void *key1, const void *key2) { const char *keyp1, *keyp2; keyp1 = key1; keyp2 = key2; return strcmp(keyp1, keyp2); } static const struct hashtab_key_params symtab_key_params = { .hash = symhash, .cmp = symcmp, }; int symtab_init(struct symtab *s, u32 size) { s->nprim = 0; return hashtab_init(&s->table, size); } int symtab_insert(struct symtab *s, char *name, void *datum) { return hashtab_insert(&s->table, name, datum, symtab_key_params); } void *symtab_search(struct symtab *s, const char *name) { return hashtab_search(&s->table, name, symtab_key_params); } |
| 9 9 9 20 1 20 20 20 20 1 1 20 19 19 19 9 26 26 26 26 26 26 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 | /* * Route Plug-In * Copyright (c) 2000 by Abramo Bagnara <abramo@alsa-project.org> * * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/pcm.h> #include "pcm_plugin.h" static void zero_areas(struct snd_pcm_plugin_channel *dvp, int ndsts, snd_pcm_uframes_t frames, snd_pcm_format_t format) { int dst = 0; for (; dst < ndsts; ++dst) { if (dvp->wanted) snd_pcm_area_silence(&dvp->area, 0, frames, format); dvp->enabled = 0; dvp++; } } static inline void copy_area(const struct snd_pcm_plugin_channel *src_channel, struct snd_pcm_plugin_channel *dst_channel, snd_pcm_uframes_t frames, snd_pcm_format_t format) { dst_channel->enabled = 1; snd_pcm_area_copy(&src_channel->area, 0, &dst_channel->area, 0, frames, format); } static snd_pcm_sframes_t route_transfer(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, snd_pcm_uframes_t frames) { int nsrcs, ndsts, dst; struct snd_pcm_plugin_channel *dvp; snd_pcm_format_t format; if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) return -ENXIO; if (frames == 0) return 0; if (frames > dst_channels[0].frames) frames = dst_channels[0].frames; nsrcs = plugin->src_format.channels; ndsts = plugin->dst_format.channels; format = plugin->dst_format.format; dvp = dst_channels; if (nsrcs <= 1) { /* expand to all channels */ for (dst = 0; dst < ndsts; ++dst) { copy_area(src_channels, dvp, frames, format); dvp++; } return frames; } for (dst = 0; dst < ndsts && dst < nsrcs; ++dst) { copy_area(src_channels, dvp, frames, format); dvp++; src_channels++; } if (dst < ndsts) zero_areas(dvp, ndsts - dst, frames, format); return frames; } int snd_pcm_plugin_build_route(struct snd_pcm_substream *plug, struct snd_pcm_plugin_format *src_format, struct snd_pcm_plugin_format *dst_format, struct snd_pcm_plugin **r_plugin) { struct snd_pcm_plugin *plugin; int err; if (snd_BUG_ON(!r_plugin)) return -ENXIO; *r_plugin = NULL; if (snd_BUG_ON(src_format->rate != dst_format->rate)) return -ENXIO; if (snd_BUG_ON(src_format->format != dst_format->format)) return -ENXIO; err = snd_pcm_plugin_build(plug, "route conversion", src_format, dst_format, 0, &plugin); if (err < 0) return err; plugin->transfer = route_transfer; *r_plugin = plugin; return 0; } |
| 21 16 16 25 25 25 25 25 25 25 25 20 12 20 25 25 25 25 24 24 21 25 24 1 1 24 22 3 1 15 25 1 1 1 1 23 1 24 24 1 2 22 22 1 24 1 1 24 1 1 1 24 24 24 3 3 21 24 23 1 1 1 1 1 1 1 24 24 1 24 24 24 24 16 3 1 1 21 21 15 1 1 1 1 2 1 1 22 21 21 21 21 21 20 14 20 20 19 20 14 21 21 21 20 20 12 12 21 21 21 21 21 20 20 23 23 23 22 22 23 24 24 24 24 24 24 24 23 22 22 22 22 22 1 24 24 24 24 24 24 24 24 34 34 29 28 28 28 27 28 28 21 28 3 28 5 24 24 24 24 24 24 24 3 24 24 20 24 24 24 24 24 24 24 24 24 24 24 1 20 20 30 22 22 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for USB Windows Media Center Ed. eHome Infrared Transceivers * * Copyright (c) 2010-2011, Jarod Wilson <jarod@redhat.com> * * Based on the original lirc_mceusb and lirc_mceusb2 drivers, by Dan * Conti, Martin Blatter and Daniel Melander, the latter of which was * in turn also based on the lirc_atiusb driver by Paul Miller. The * two mce drivers were merged into one by Jarod Wilson, with transmit * support for the 1st-gen device added primarily by Patrick Calhoun, * with a bit of tweaks by Jarod. Debugging improvements and proper * support for what appears to be 3rd-gen hardware added by Jarod. * Initial port from lirc driver to ir-core drivery by Jarod, based * partially on a port to an earlier proposed IR infrastructure by * Jon Smirl, which included enhancements and simplifications to the * incoming IR buffer parsing routines. * * Updated in July of 2011 with the aid of Microsoft's official * remote/transceiver requirements and specification document, found at * download.microsoft.com, title * Windows-Media-Center-RC-IR-Collection-Green-Button-Specification-03-08-2011-V2.pdf */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #define DRIVER_VERSION "1.95" #define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>" #define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \ "device driver" #define DRIVER_NAME "mceusb" #define USB_TX_TIMEOUT 1000 /* in milliseconds */ #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */ #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */ /* MCE constants */ #define MCE_IRBUF_SIZE 128 /* TX IR buffer length */ #define MCE_TIME_UNIT 50 /* Approx 50us resolution */ #define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */ #define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1) /* Actual format is 0x80 + num_bytes */ #define MCE_IRDATA_TRAILER 0x80 /* End of IR data */ #define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */ #define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */ #define MCE_PULSE_BIT 0x80 /* Pulse bit, MSB set == PULSE else SPACE */ #define MCE_PULSE_MASK 0x7f /* Pulse mask */ #define MCE_MAX_PULSE_LENGTH 0x7f /* Longest transmittable pulse symbol */ /* * The interface between the host and the IR hardware is command-response * based. All commands and responses have a consistent format, where a lead * byte always identifies the type of data following it. The lead byte has * a port value in the 3 highest bits and a length value in the 5 lowest * bits. * * The length field is overloaded, with a value of 11111 indicating that the * following byte is a command or response code, and the length of the entire * message is determined by the code. If the length field is not 11111, then * it specifies the number of bytes of port data that follow. */ #define MCE_CMD 0x1f #define MCE_PORT_IR 0x4 /* (0x4 << 5) | MCE_CMD = 0x9f */ #define MCE_PORT_SYS 0x7 /* (0x7 << 5) | MCE_CMD = 0xff */ #define MCE_PORT_SER 0x6 /* 0xc0 through 0xdf flush & 0x1f bytes */ #define MCE_PORT_MASK 0xe0 /* Mask out command bits */ /* Command port headers */ #define MCE_CMD_PORT_IR 0x9f /* IR-related cmd/rsp */ #define MCE_CMD_PORT_SYS 0xff /* System (non-IR) device cmd/rsp */ /* Commands that set device state (2-4 bytes in length) */ #define MCE_CMD_RESET 0xfe /* Reset device, 2 bytes */ #define MCE_CMD_RESUME 0xaa /* Resume device after error, 2 bytes */ #define MCE_CMD_SETIRCFS 0x06 /* Set tx carrier, 4 bytes */ #define MCE_CMD_SETIRTIMEOUT 0x0c /* Set timeout, 4 bytes */ #define MCE_CMD_SETIRTXPORTS 0x08 /* Set tx ports, 3 bytes */ #define MCE_CMD_SETIRRXPORTEN 0x14 /* Set rx ports, 3 bytes */ #define MCE_CMD_FLASHLED 0x23 /* Flash receiver LED, 2 bytes */ /* Commands that query device state (all 2 bytes, unless noted) */ #define MCE_CMD_GETIRCFS 0x07 /* Get carrier */ #define MCE_CMD_GETIRTIMEOUT 0x0d /* Get timeout */ #define MCE_CMD_GETIRTXPORTS 0x13 /* Get tx ports */ #define MCE_CMD_GETIRRXPORTEN 0x15 /* Get rx ports */ #define MCE_CMD_GETPORTSTATUS 0x11 /* Get tx port status, 3 bytes */ #define MCE_CMD_GETIRNUMPORTS 0x16 /* Get number of ports */ #define MCE_CMD_GETWAKESOURCE 0x17 /* Get wake source */ #define MCE_CMD_GETEMVER 0x22 /* Get emulator interface version */ #define MCE_CMD_GETDEVDETAILS 0x21 /* Get device details (em ver2 only) */ #define MCE_CMD_GETWAKESUPPORT 0x20 /* Get wake details (em ver2 only) */ #define MCE_CMD_GETWAKEVERSION 0x18 /* Get wake pattern (em ver2 only) */ /* Misc commands */ #define MCE_CMD_NOP 0xff /* No operation */ /* Responses to commands (non-error cases) */ #define MCE_RSP_EQIRCFS 0x06 /* tx carrier, 4 bytes */ #define MCE_RSP_EQIRTIMEOUT 0x0c /* rx timeout, 4 bytes */ #define MCE_RSP_GETWAKESOURCE 0x17 /* wake source, 3 bytes */ #define MCE_RSP_EQIRTXPORTS 0x08 /* tx port mask, 3 bytes */ #define MCE_RSP_EQIRRXPORTEN 0x14 /* rx port mask, 3 bytes */ #define MCE_RSP_GETPORTSTATUS 0x11 /* tx port status, 7 bytes */ #define MCE_RSP_EQIRRXCFCNT 0x15 /* rx carrier count, 4 bytes */ #define MCE_RSP_EQIRNUMPORTS 0x16 /* number of ports, 4 bytes */ #define MCE_RSP_EQWAKESUPPORT 0x20 /* wake capabilities, 3 bytes */ #define MCE_RSP_EQWAKEVERSION 0x18 /* wake pattern details, 6 bytes */ #define MCE_RSP_EQDEVDETAILS 0x21 /* device capabilities, 3 bytes */ #define MCE_RSP_EQEMVER 0x22 /* emulator interface ver, 3 bytes */ #define MCE_RSP_FLASHLED 0x23 /* success flashing LED, 2 bytes */ /* Responses to error cases, must send MCE_CMD_RESUME to clear them */ #define MCE_RSP_CMD_ILLEGAL 0xfe /* illegal command for port, 2 bytes */ #define MCE_RSP_TX_TIMEOUT 0x81 /* tx timed out, 2 bytes */ /* Misc commands/responses not defined in the MCE remote/transceiver spec */ #define MCE_CMD_SIG_END 0x01 /* End of signal */ #define MCE_CMD_PING 0x03 /* Ping device */ #define MCE_CMD_UNKNOWN 0x04 /* Unknown */ #define MCE_CMD_UNKNOWN2 0x05 /* Unknown */ #define MCE_CMD_UNKNOWN3 0x09 /* Unknown */ #define MCE_CMD_UNKNOWN4 0x0a /* Unknown */ #define MCE_CMD_G_REVISION 0x0b /* Get hw/sw revision */ #define MCE_CMD_UNKNOWN5 0x0e /* Unknown */ #define MCE_CMD_UNKNOWN6 0x0f /* Unknown */ #define MCE_CMD_UNKNOWN8 0x19 /* Unknown */ #define MCE_CMD_UNKNOWN9 0x1b /* Unknown */ #define MCE_CMD_NULL 0x00 /* These show up various places... */ /* if buf[i] & MCE_PORT_MASK == 0x80 and buf[i] != MCE_CMD_PORT_IR, * then we're looking at a raw IR data sample */ #define MCE_COMMAND_IRDATA 0x80 #define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */ #define VENDOR_PHILIPS 0x0471 #define VENDOR_SMK 0x0609 #define VENDOR_TATUNG 0x1460 #define VENDOR_GATEWAY 0x107b #define VENDOR_SHUTTLE 0x1308 #define VENDOR_SHUTTLE2 0x051c #define VENDOR_MITSUMI 0x03ee #define VENDOR_TOPSEED 0x1784 #define VENDOR_RICAVISION 0x179d #define VENDOR_ITRON 0x195d #define VENDOR_FIC 0x1509 #define VENDOR_LG 0x043e #define VENDOR_MICROSOFT 0x045e #define VENDOR_FORMOSA 0x147a #define VENDOR_FINTEK 0x1934 #define VENDOR_PINNACLE 0x2304 #define VENDOR_ECS 0x1019 #define VENDOR_WISTRON 0x0fb8 #define VENDOR_COMPRO 0x185b #define VENDOR_NORTHSTAR 0x04eb #define VENDOR_REALTEK 0x0bda #define VENDOR_TIVO 0x105a #define VENDOR_CONEXANT 0x0572 #define VENDOR_TWISTEDMELON 0x2596 #define VENDOR_HAUPPAUGE 0x2040 #define VENDOR_PCTV 0x2013 #define VENDOR_ADAPTEC 0x03f3 enum mceusb_model_type { MCE_GEN2 = 0, /* Most boards */ MCE_GEN1, MCE_GEN3, MCE_GEN3_BROKEN_IRTIMEOUT, MCE_GEN2_TX_INV, MCE_GEN2_TX_INV_RX_GOOD, POLARIS_EVK, CX_HYBRID_TV, MULTIFUNCTION, TIVO_KIT, MCE_GEN2_NO_TX, HAUPPAUGE_CX_HYBRID_TV, EVROMEDIA_FULL_HYBRID_FULLHD, ASTROMETA_T2HYBRID, }; struct mceusb_model { u32 mce_gen1:1; u32 mce_gen2:1; u32 mce_gen3:1; u32 tx_mask_normal:1; u32 no_tx:1; u32 broken_irtimeout:1; /* * 2nd IR receiver (short-range, wideband) for learning mode: * 0, absent 2nd receiver (rx2) * 1, rx2 present * 2, rx2 which under counts IR carrier cycles */ u32 rx2; int ir_intfnum; const char *rc_map; /* Allow specify a per-board map */ const char *name; /* per-board name */ }; static const struct mceusb_model mceusb_model[] = { [MCE_GEN1] = { .mce_gen1 = 1, .tx_mask_normal = 1, .rx2 = 2, }, [MCE_GEN2] = { .mce_gen2 = 1, .rx2 = 2, }, [MCE_GEN2_NO_TX] = { .mce_gen2 = 1, .no_tx = 1, }, [MCE_GEN2_TX_INV] = { .mce_gen2 = 1, .tx_mask_normal = 1, .rx2 = 1, }, [MCE_GEN2_TX_INV_RX_GOOD] = { .mce_gen2 = 1, .tx_mask_normal = 1, .rx2 = 2, }, [MCE_GEN3] = { .mce_gen3 = 1, .tx_mask_normal = 1, .rx2 = 2, }, [MCE_GEN3_BROKEN_IRTIMEOUT] = { .mce_gen3 = 1, .tx_mask_normal = 1, .rx2 = 2, .broken_irtimeout = 1 }, [POLARIS_EVK] = { /* * In fact, the EVK is shipped without * remotes, but we should have something handy, * to allow testing it */ .name = "Conexant Hybrid TV (cx231xx) MCE IR", .rx2 = 2, }, [CX_HYBRID_TV] = { .no_tx = 1, /* tx isn't wired up at all */ .name = "Conexant Hybrid TV (cx231xx) MCE IR", }, [HAUPPAUGE_CX_HYBRID_TV] = { .no_tx = 1, /* eeprom says it has no tx */ .name = "Conexant Hybrid TV (cx231xx) MCE IR no TX", }, [MULTIFUNCTION] = { .mce_gen2 = 1, .ir_intfnum = 2, .rx2 = 2, }, [TIVO_KIT] = { .mce_gen2 = 1, .rc_map = RC_MAP_TIVO, .rx2 = 2, }, [EVROMEDIA_FULL_HYBRID_FULLHD] = { .name = "Evromedia USB Full Hybrid Full HD", .no_tx = 1, .rc_map = RC_MAP_MSI_DIGIVOX_III, }, [ASTROMETA_T2HYBRID] = { .name = "Astrometa T2Hybrid", .no_tx = 1, .rc_map = RC_MAP_ASTROMETA_T2HYBRID, } }; static const struct usb_device_id mceusb_dev_table[] = { /* Original Microsoft MCE IR Transceiver (often HP-branded) */ { USB_DEVICE(VENDOR_MICROSOFT, 0x006d), .driver_info = MCE_GEN1 }, /* Philips Infrared Transceiver - Sahara branded */ { USB_DEVICE(VENDOR_PHILIPS, 0x0608) }, /* Philips Infrared Transceiver - HP branded */ { USB_DEVICE(VENDOR_PHILIPS, 0x060c), .driver_info = MCE_GEN2_TX_INV }, /* Philips SRM5100 */ { USB_DEVICE(VENDOR_PHILIPS, 0x060d) }, /* Philips Infrared Transceiver - Omaura */ { USB_DEVICE(VENDOR_PHILIPS, 0x060f) }, /* Philips Infrared Transceiver - Spinel plus */ { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, /* Philips eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, /* Philips/Spinel plus IR transceiver for ASUS */ { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, /* Philips/Spinel plus IR transceiver for ASUS */ { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, /* Philips IR transceiver (Dell branded) */ { USB_DEVICE(VENDOR_PHILIPS, 0x2093), .driver_info = MCE_GEN2_TX_INV }, /* Realtek MCE IR Receiver and card reader */ { USB_DEVICE(VENDOR_REALTEK, 0x0161), .driver_info = MULTIFUNCTION }, /* SMK/Toshiba G83C0004D410 */ { USB_DEVICE(VENDOR_SMK, 0x031d), .driver_info = MCE_GEN2_TX_INV_RX_GOOD }, /* SMK eHome Infrared Transceiver (Sony VAIO) */ { USB_DEVICE(VENDOR_SMK, 0x0322), .driver_info = MCE_GEN2_TX_INV }, /* bundled with Hauppauge PVR-150 */ { USB_DEVICE(VENDOR_SMK, 0x0334), .driver_info = MCE_GEN2_TX_INV }, /* SMK eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_SMK, 0x0338) }, /* SMK/I-O Data GV-MC7/RCKIT Receiver */ { USB_DEVICE(VENDOR_SMK, 0x0353), .driver_info = MCE_GEN2_NO_TX }, /* SMK RXX6000 Infrared Receiver */ { USB_DEVICE(VENDOR_SMK, 0x0357), .driver_info = MCE_GEN2_NO_TX }, /* Tatung eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TATUNG, 0x9150) }, /* Shuttle eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_SHUTTLE, 0xc001) }, /* Shuttle eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_SHUTTLE2, 0xc001) }, /* Gateway eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_GATEWAY, 0x3009) }, /* Mitsumi */ { USB_DEVICE(VENDOR_MITSUMI, 0x2501) }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0001), .driver_info = MCE_GEN2_TX_INV }, /* Topseed HP eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0006), .driver_info = MCE_GEN2_TX_INV }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0007), .driver_info = MCE_GEN2_TX_INV }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0008), .driver_info = MCE_GEN3 }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x000a), .driver_info = MCE_GEN2_TX_INV }, /* Topseed eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_TOPSEED, 0x0011), .driver_info = MCE_GEN3_BROKEN_IRTIMEOUT }, /* Ricavision internal Infrared Transceiver */ { USB_DEVICE(VENDOR_RICAVISION, 0x0010) }, /* Itron ione Libra Q-11 */ { USB_DEVICE(VENDOR_ITRON, 0x7002) }, /* FIC eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_FIC, 0x9242) }, /* LG eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_LG, 0x9803) }, /* Microsoft MCE Infrared Transceiver */ { USB_DEVICE(VENDOR_MICROSOFT, 0x00a0) }, /* Formosa eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_FORMOSA, 0xe015) }, /* Formosa21 / eHome Infrared Receiver */ { USB_DEVICE(VENDOR_FORMOSA, 0xe016) }, /* Formosa aim / Trust MCE Infrared Receiver */ { USB_DEVICE(VENDOR_FORMOSA, 0xe017), .driver_info = MCE_GEN2_NO_TX }, /* Formosa Industrial Computing / Beanbag Emulation Device */ { USB_DEVICE(VENDOR_FORMOSA, 0xe018) }, /* Formosa21 / eHome Infrared Receiver */ { USB_DEVICE(VENDOR_FORMOSA, 0xe03a) }, /* Formosa Industrial Computing AIM IR605/A */ { USB_DEVICE(VENDOR_FORMOSA, 0xe03c) }, /* Formosa Industrial Computing */ { USB_DEVICE(VENDOR_FORMOSA, 0xe03e) }, /* Formosa Industrial Computing */ { USB_DEVICE(VENDOR_FORMOSA, 0xe042) }, /* Fintek eHome Infrared Transceiver (HP branded) */ { USB_DEVICE(VENDOR_FINTEK, 0x5168), .driver_info = MCE_GEN2_TX_INV }, /* Fintek eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_FINTEK, 0x0602) }, /* Fintek eHome Infrared Transceiver (in the AOpen MP45) */ { USB_DEVICE(VENDOR_FINTEK, 0x0702) }, /* Pinnacle Remote Kit */ { USB_DEVICE(VENDOR_PINNACLE, 0x0225), .driver_info = MCE_GEN3 }, /* Elitegroup Computer Systems IR */ { USB_DEVICE(VENDOR_ECS, 0x0f38) }, /* Wistron Corp. eHome Infrared Receiver */ { USB_DEVICE(VENDOR_WISTRON, 0x0002) }, /* Compro K100 */ { USB_DEVICE(VENDOR_COMPRO, 0x3020) }, /* Compro K100 v2 */ { USB_DEVICE(VENDOR_COMPRO, 0x3082) }, /* Northstar Systems, Inc. eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) }, /* TiVo PC IR Receiver */ { USB_DEVICE(VENDOR_TIVO, 0x2000), .driver_info = TIVO_KIT }, /* Conexant Hybrid TV "Shelby" Polaris SDK */ { USB_DEVICE(VENDOR_CONEXANT, 0x58a1), .driver_info = POLARIS_EVK }, /* Conexant Hybrid TV RDU253S Polaris */ { USB_DEVICE(VENDOR_CONEXANT, 0x58a5), .driver_info = CX_HYBRID_TV }, /* Twisted Melon Inc. - Manta Mini Receiver */ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8008) }, /* Twisted Melon Inc. - Manta Pico Receiver */ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8016) }, /* Twisted Melon Inc. - Manta Transceiver */ { USB_DEVICE(VENDOR_TWISTEDMELON, 0x8042) }, /* Hauppauge WINTV-HVR-HVR 930C-HD - based on cx231xx */ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb130), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb131), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb138), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb139), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, /* Hauppauge WinTV-HVR-935C - based on cx231xx */ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb151), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, /* Hauppauge WinTV-HVR-955Q - based on cx231xx */ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb123), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, /* Hauppauge WinTV-HVR-975 - based on cx231xx */ { USB_DEVICE(VENDOR_HAUPPAUGE, 0xb150), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_PCTV, 0x0259), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, { USB_DEVICE(VENDOR_PCTV, 0x025e), .driver_info = HAUPPAUGE_CX_HYBRID_TV }, /* Adaptec / HP eHome Receiver */ { USB_DEVICE(VENDOR_ADAPTEC, 0x0094) }, /* Evromedia USB Full Hybrid Full HD */ { USB_DEVICE(0x1b80, 0xd3b2), .driver_info = EVROMEDIA_FULL_HYBRID_FULLHD }, /* Astrometa T2hybrid */ { USB_DEVICE(0x15f4, 0x0135), .driver_info = ASTROMETA_T2HYBRID }, /* Terminating entry */ { } }; /* data structure for each usb transceiver */ struct mceusb_dev { /* ir-core bits */ struct rc_dev *rc; /* optional features we can enable */ bool carrier_report_enabled; bool wideband_rx_enabled; /* aka learning mode, short-range rx */ /* core device bits */ struct device *dev; /* usb */ struct usb_device *usbdev; struct usb_interface *usbintf; struct urb *urb_in; unsigned int pipe_in; struct usb_endpoint_descriptor *usb_ep_out; unsigned int pipe_out; /* buffers and dma */ unsigned char *buf_in; unsigned int len_in; dma_addr_t dma_in; enum { CMD_HEADER = 0, SUBCMD, CMD_DATA, PARSE_IRDATA, } parser_state; u8 cmd, rem; /* Remaining IR data bytes in packet */ struct { u32 connected:1; u32 tx_mask_normal:1; u32 microsoft_gen1:1; u32 no_tx:1; u32 rx2; } flags; /* transmit support */ u32 carrier; unsigned char tx_mask; char phys[64]; enum mceusb_model_type model; bool need_reset; /* flag to issue a device resume cmd */ u8 emver; /* emulator interface version */ u8 num_txports; /* number of transmit ports */ u8 num_rxports; /* number of receive sensors */ u8 txports_cabled; /* bitmask of transmitters with cable */ u8 rxports_active; /* bitmask of active receive sensors */ bool learning_active; /* wideband rx is active */ /* receiver carrier frequency detection support */ u32 pulse_tunit; /* IR pulse "on" cumulative time units */ u32 pulse_count; /* pulse "on" count in measurement interval */ /* * support for async error handler mceusb_deferred_kevent() * where usb_clear_halt(), usb_reset_configuration(), * usb_reset_device(), etc. must be done in process context */ struct work_struct kevent; unsigned long kevent_flags; # define EVENT_TX_HALT 0 # define EVENT_RX_HALT 1 # define EVENT_RST_PEND 31 }; /* MCE Device Command Strings, generally a port and command pair */ static char DEVICE_RESUME[] = {MCE_CMD_NULL, MCE_CMD_PORT_SYS, MCE_CMD_RESUME}; static char GET_REVISION[] = {MCE_CMD_PORT_SYS, MCE_CMD_G_REVISION}; static char GET_EMVER[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETEMVER}; static char GET_WAKEVERSION[] = {MCE_CMD_PORT_SYS, MCE_CMD_GETWAKEVERSION}; static char FLASH_LED[] = {MCE_CMD_PORT_SYS, MCE_CMD_FLASHLED}; static char GET_UNKNOWN2[] = {MCE_CMD_PORT_IR, MCE_CMD_UNKNOWN2}; static char GET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRCFS}; static char GET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTIMEOUT}; static char GET_NUM_PORTS[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRNUMPORTS}; static char GET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRTXPORTS}; static char GET_RX_SENSOR[] = {MCE_CMD_PORT_IR, MCE_CMD_GETIRRXPORTEN}; /* sub in desired values in lower byte or bytes for full command */ /* FIXME: make use of these for transmit. static char SET_CARRIER_FREQ[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRCFS, 0x00, 0x00}; static char SET_TX_BITMASK[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00}; static char SET_RX_TIMEOUT[] = {MCE_CMD_PORT_IR, MCE_CMD_SETIRTIMEOUT, 0x00, 0x00}; static char SET_RX_SENSOR[] = {MCE_CMD_PORT_IR, MCE_RSP_EQIRRXPORTEN, 0x00}; */ static int mceusb_cmd_datasize(u8 cmd, u8 subcmd) { int datasize = 0; switch (cmd) { case MCE_CMD_NULL: if (subcmd == MCE_CMD_PORT_SYS) datasize = 1; break; case MCE_CMD_PORT_SYS: switch (subcmd) { case MCE_RSP_GETPORTSTATUS: datasize = 5; break; case MCE_RSP_EQWAKEVERSION: datasize = 4; break; case MCE_CMD_G_REVISION: datasize = 4; break; case MCE_RSP_EQWAKESUPPORT: case MCE_RSP_GETWAKESOURCE: case MCE_RSP_EQDEVDETAILS: case MCE_RSP_EQEMVER: datasize = 1; break; } break; case MCE_CMD_PORT_IR: switch (subcmd) { case MCE_CMD_UNKNOWN: case MCE_RSP_EQIRCFS: case MCE_RSP_EQIRTIMEOUT: case MCE_RSP_EQIRRXCFCNT: case MCE_RSP_EQIRNUMPORTS: datasize = 2; break; case MCE_CMD_SIG_END: case MCE_RSP_EQIRTXPORTS: case MCE_RSP_EQIRRXPORTEN: datasize = 1; break; } } return datasize; } static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len, int offset, int len, bool out) { #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) char *inout; u8 cmd, subcmd, *data; struct device *dev = ir->dev; u32 carrier, period; if (offset < 0 || offset >= buf_len) return; dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)", (out ? 't' : 'r'), offset, min(len, buf_len - offset), buf + offset, len, buf_len); inout = out ? "Request" : "Got"; cmd = buf[offset]; subcmd = (offset + 1 < buf_len) ? buf[offset + 1] : 0; data = &buf[offset] + 2; /* Trace meaningless 0xb1 0x60 header bytes on original receiver */ if (ir->flags.microsoft_gen1 && !out && !offset) { dev_dbg(dev, "MCE gen 1 header"); return; } /* Trace IR data header or trailer */ if (cmd != MCE_CMD_PORT_IR && (cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA) { if (cmd == MCE_IRDATA_TRAILER) dev_dbg(dev, "End of raw IR data"); else dev_dbg(dev, "Raw IR data, %d pulse/space samples", cmd & MCE_PACKET_LENGTH_MASK); return; } /* Unexpected end of buffer? */ if (offset + len > buf_len) return; /* Decode MCE command/response */ switch (cmd) { case MCE_CMD_NULL: if (subcmd == MCE_CMD_NULL) break; if ((subcmd == MCE_CMD_PORT_SYS) && (data[0] == MCE_CMD_RESUME)) dev_dbg(dev, "Device resume requested"); else dev_dbg(dev, "Unknown command 0x%02x 0x%02x", cmd, subcmd); break; case MCE_CMD_PORT_SYS: switch (subcmd) { case MCE_RSP_EQEMVER: if (!out) dev_dbg(dev, "Emulator interface version %x", data[0]); break; case MCE_CMD_G_REVISION: if (len == 2) dev_dbg(dev, "Get hw/sw rev?"); else dev_dbg(dev, "hw/sw rev %4ph", &buf[offset + 2]); break; case MCE_CMD_RESUME: dev_dbg(dev, "Device resume requested"); break; case MCE_RSP_CMD_ILLEGAL: dev_dbg(dev, "Illegal PORT_SYS command"); break; case MCE_RSP_EQWAKEVERSION: if (!out) dev_dbg(dev, "Wake version, proto: 0x%02x, payload: 0x%02x, address: 0x%02x, version: 0x%02x", data[0], data[1], data[2], data[3]); break; case MCE_RSP_GETPORTSTATUS: if (!out) /* We use data1 + 1 here, to match hw labels */ dev_dbg(dev, "TX port %d: blaster is%s connected", data[0] + 1, data[3] ? " not" : ""); break; case MCE_CMD_FLASHLED: dev_dbg(dev, "Attempting to flash LED"); break; default: dev_dbg(dev, "Unknown command 0x%02x 0x%02x", cmd, subcmd); break; } break; case MCE_CMD_PORT_IR: switch (subcmd) { case MCE_CMD_SIG_END: dev_dbg(dev, "End of signal"); break; case MCE_CMD_PING: dev_dbg(dev, "Ping"); break; case MCE_CMD_UNKNOWN: dev_dbg(dev, "Resp to 9f 05 of 0x%02x 0x%02x", data[0], data[1]); break; case MCE_RSP_EQIRCFS: if (!data[0] && !data[1]) { dev_dbg(dev, "%s: no carrier", inout); break; } // prescaler should make sense if (data[0] > 8) break; period = DIV_ROUND_CLOSEST((1U << data[0] * 2) * (data[1] + 1), 10); if (!period) break; carrier = USEC_PER_SEC / period; dev_dbg(dev, "%s carrier of %u Hz (period %uus)", inout, carrier, period); break; case MCE_CMD_GETIRCFS: dev_dbg(dev, "Get carrier mode and freq"); break; case MCE_RSP_EQIRTXPORTS: dev_dbg(dev, "%s transmit blaster mask of 0x%02x", inout, data[0]); break; case MCE_RSP_EQIRTIMEOUT: /* value is in units of 50us, so x*50/1000 ms */ period = ((data[0] << 8) | data[1]) * MCE_TIME_UNIT / 1000; dev_dbg(dev, "%s receive timeout of %d ms", inout, period); break; case MCE_CMD_GETIRTIMEOUT: dev_dbg(dev, "Get receive timeout"); break; case MCE_CMD_GETIRTXPORTS: dev_dbg(dev, "Get transmit blaster mask"); break; case MCE_RSP_EQIRRXPORTEN: dev_dbg(dev, "%s %s-range receive sensor in use", inout, data[0] == 0x02 ? "short" : "long"); break; case MCE_CMD_GETIRRXPORTEN: /* aka MCE_RSP_EQIRRXCFCNT */ if (out) dev_dbg(dev, "Get receive sensor"); else dev_dbg(dev, "RX carrier cycle count: %d", ((data[0] << 8) | data[1])); break; case MCE_RSP_EQIRNUMPORTS: if (out) break; dev_dbg(dev, "Num TX ports: %x, num RX ports: %x", data[0], data[1]); break; case MCE_RSP_CMD_ILLEGAL: dev_dbg(dev, "Illegal PORT_IR command"); break; case MCE_RSP_TX_TIMEOUT: dev_dbg(dev, "IR TX timeout (TX buffer underrun)"); break; default: dev_dbg(dev, "Unknown command 0x%02x 0x%02x", cmd, subcmd); break; } break; default: break; } #endif } /* * Schedule work that can't be done in interrupt handlers * (mceusb_dev_recv() and mce_write_callback()) nor BH work. * Invokes mceusb_deferred_kevent() for recovering from * error events specified by the kevent bit field. */ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent) { set_bit(kevent, &ir->kevent_flags); if (test_bit(EVENT_RST_PEND, &ir->kevent_flags)) { dev_dbg(ir->dev, "kevent %d dropped pending USB Reset Device", kevent); return; } if (!schedule_work(&ir->kevent)) dev_dbg(ir->dev, "kevent %d already scheduled", kevent); else dev_dbg(ir->dev, "kevent %d scheduled", kevent); } static void mce_write_callback(struct urb *urb) { if (!urb) return; complete(urb->context); } /* * Write (TX/send) data to MCE device USB endpoint out. * Used for IR blaster TX and MCE device commands. * * Return: The number of bytes written (> 0) or errno (< 0). */ static int mce_write(struct mceusb_dev *ir, u8 *data, int size) { int ret; struct urb *urb; struct device *dev = ir->dev; unsigned char *buf_out; struct completion tx_done; unsigned long expire; unsigned long ret_wait; mceusb_dev_printdata(ir, data, size, 0, size, true); urb = usb_alloc_urb(0, GFP_KERNEL); if (unlikely(!urb)) { dev_err(dev, "Error: mce write couldn't allocate urb"); return -ENOMEM; } buf_out = kmalloc(size, GFP_KERNEL); if (!buf_out) { usb_free_urb(urb); return -ENOMEM; } init_completion(&tx_done); /* outbound data */ if (usb_endpoint_xfer_int(ir->usb_ep_out)) usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out, buf_out, size, mce_write_callback, &tx_done, ir->usb_ep_out->bInterval); else usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out, buf_out, size, mce_write_callback, &tx_done); memcpy(buf_out, data, size); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { dev_err(dev, "Error: mce write submit urb error = %d", ret); kfree(buf_out); usb_free_urb(urb); return ret; } expire = msecs_to_jiffies(USB_TX_TIMEOUT); ret_wait = wait_for_completion_timeout(&tx_done, expire); if (!ret_wait) { dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))", expire, USB_TX_TIMEOUT); usb_kill_urb(urb); ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status); } else { ret = urb->status; } if (ret >= 0) ret = urb->actual_length; /* bytes written */ switch (urb->status) { /* success */ case 0: break; case -ECONNRESET: case -ENOENT: case -EILSEQ: case -ESHUTDOWN: break; case -EPIPE: dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)", urb->status); mceusb_defer_kevent(ir, EVENT_TX_HALT); break; default: dev_err(ir->dev, "Error: mce write urb status = %d", urb->status); break; } dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)", ret, ret_wait, expire, USB_TX_TIMEOUT, urb->actual_length, urb->status); kfree(buf_out); usb_free_urb(urb); return ret; } static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size) { int rsize = sizeof(DEVICE_RESUME); if (ir->need_reset) { ir->need_reset = false; mce_write(ir, DEVICE_RESUME, rsize); msleep(10); } mce_write(ir, data, size); msleep(10); } /* * Transmit IR out the MCE device IR blaster port(s). * * Convert IR pulse/space sequence from LIRC to MCE format. * Break up a long IR sequence into multiple parts (MCE IR data packets). * * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec. * Pulses and spaces are implicit by their position. * The first IR sample, txbuf[0], is always a pulse. * * u8 irbuf[] consists of multiple IR data packets for the MCE device. * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples. * An IR sample is 1-bit pulse/space flag with 7-bit time * in MCE time units (50usec). * * Return: The number of IR samples sent (> 0) or errno (< 0). */ static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count) { struct mceusb_dev *ir = dev->priv; u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 }; u8 irbuf[MCE_IRBUF_SIZE]; int ircount = 0; unsigned int irsample; int i, length, ret; /* Send the set TX ports command */ cmdbuf[2] = ir->tx_mask; mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* Generate mce IR data packet */ for (i = 0; i < count; i++) { irsample = txbuf[i] / MCE_TIME_UNIT; /* loop to support long pulses/spaces > 6350us (127*50us) */ while (irsample > 0) { /* Insert IR header every 30th entry */ if (ircount % MCE_PACKET_SIZE == 0) { /* Room for IR header and one IR sample? */ if (ircount >= MCE_IRBUF_SIZE - 1) { /* Send near full buffer */ ret = mce_write(ir, irbuf, ircount); if (ret < 0) return ret; ircount = 0; } irbuf[ircount++] = MCE_IRDATA_HEADER; } /* Insert IR sample */ if (irsample <= MCE_MAX_PULSE_LENGTH) { irbuf[ircount] = irsample; irsample = 0; } else { irbuf[ircount] = MCE_MAX_PULSE_LENGTH; irsample -= MCE_MAX_PULSE_LENGTH; } /* * Even i = IR pulse * Odd i = IR space */ irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT); ircount++; /* IR buffer full? */ if (ircount >= MCE_IRBUF_SIZE) { /* Fix packet length in last header */ length = ircount % MCE_PACKET_SIZE; if (length > 0) irbuf[ircount - length] -= MCE_PACKET_SIZE - length; /* Send full buffer */ ret = mce_write(ir, irbuf, ircount); if (ret < 0) return ret; ircount = 0; } } } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */ /* Fix packet length in last header */ length = ircount % MCE_PACKET_SIZE; if (length > 0) irbuf[ircount - length] -= MCE_PACKET_SIZE - length; /* Append IR trailer (0x80) to final partial (or empty) IR buffer */ irbuf[ircount++] = MCE_IRDATA_TRAILER; /* Send final buffer */ ret = mce_write(ir, irbuf, ircount); if (ret < 0) return ret; return count; } /* Sets active IR outputs -- mce devices typically have two */ static int mceusb_set_tx_mask(struct rc_dev *dev, u32 mask) { struct mceusb_dev *ir = dev->priv; /* return number of transmitters */ int emitters = ir->num_txports ? ir->num_txports : 2; if (mask >= (1 << emitters)) return emitters; if (ir->flags.tx_mask_normal) ir->tx_mask = mask; else ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ? mask ^ MCE_DEFAULT_TX_MASK : mask) << 1; return 0; } /* Sets the send carrier frequency and mode */ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier) { struct mceusb_dev *ir = dev->priv; int clk = 10000000; int prescaler = 0, divisor = 0; unsigned char cmdbuf[4] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRCFS, 0x00, 0x00 }; /* Carrier has changed */ if (ir->carrier != carrier) { if (carrier == 0) { ir->carrier = carrier; cmdbuf[2] = MCE_CMD_SIG_END; cmdbuf[3] = MCE_IRDATA_TRAILER; dev_dbg(ir->dev, "disabling carrier modulation"); mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); return 0; } for (prescaler = 0; prescaler < 4; ++prescaler) { divisor = (clk >> (2 * prescaler)) / carrier; if (divisor <= 0xff) { ir->carrier = carrier; cmdbuf[2] = prescaler; cmdbuf[3] = divisor; dev_dbg(ir->dev, "requesting %u HZ carrier", carrier); /* Transmit new carrier to mce device */ mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); return 0; } } return -EINVAL; } return 0; } static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout) { u8 cmdbuf[4] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTIMEOUT, 0, 0 }; struct mceusb_dev *ir = dev->priv; unsigned int units; units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT); cmdbuf[2] = units >> 8; cmdbuf[3] = units; mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* get receiver timeout value */ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); return 0; } /* * Select or deselect the 2nd receiver port. * Second receiver is learning mode, wide-band, short-range receiver. * Only one receiver (long or short range) may be active at a time. */ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable) { struct mceusb_dev *ir = dev->priv; unsigned char cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRRXPORTEN, 0x00 }; dev_dbg(ir->dev, "select %s-range receive sensor", enable ? "short" : "long"); if (enable) { ir->wideband_rx_enabled = true; cmdbuf[2] = 2; /* port 2 is short range receiver */ } else { ir->wideband_rx_enabled = false; cmdbuf[2] = 1; /* port 1 is long range receiver */ } mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); /* response from device sets ir->learning_active */ return 0; } /* * Enable/disable receiver carrier frequency pass through reporting. * Only the short-range receiver has carrier frequency measuring capability. * Implicitly select this receiver when enabling carrier frequency reporting. */ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable) { struct mceusb_dev *ir = dev->priv; unsigned char cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRRXPORTEN, 0x00 }; dev_dbg(ir->dev, "%s short-range receiver carrier reporting", enable ? "enable" : "disable"); if (enable) { ir->carrier_report_enabled = true; if (!ir->learning_active) { cmdbuf[2] = 2; /* port 2 is short range receiver */ mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } else { ir->carrier_report_enabled = false; /* * Revert to normal (long-range) receiver only if the * wideband (short-range) receiver wasn't explicitly * enabled. */ if (ir->learning_active && !ir->wideband_rx_enabled) { cmdbuf[2] = 1; /* port 1 is long range receiver */ mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } return 0; } /* * Handle PORT_SYS/IR command response received from the MCE device. * * Assumes single response with all its data (not truncated) * in buf_in[]. The response itself determines its total length * (mceusb_cmd_datasize() + 2) and hence the minimum size of buf_in[]. * * We don't do anything but print debug spew for many of the command bits * we receive from the hardware, but some of them are useful information * we want to store so that we can use them. */ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in) { u8 cmd = buf_in[0]; u8 subcmd = buf_in[1]; u8 *hi = &buf_in[2]; /* read only when required */ u8 *lo = &buf_in[3]; /* read only when required */ struct ir_raw_event rawir = {}; u32 carrier_cycles; u32 cycles_fix; if (cmd == MCE_CMD_PORT_SYS) { switch (subcmd) { /* the one and only 5-byte return value command */ case MCE_RSP_GETPORTSTATUS: if (buf_in[5] == 0 && *hi < 8) ir->txports_cabled |= 1 << *hi; break; /* 1-byte return value commands */ case MCE_RSP_EQEMVER: ir->emver = *hi; break; /* No return value commands */ case MCE_RSP_CMD_ILLEGAL: ir->need_reset = true; break; default: break; } return; } if (cmd != MCE_CMD_PORT_IR) return; switch (subcmd) { /* 2-byte return value commands */ case MCE_RSP_EQIRTIMEOUT: ir->rc->timeout = (*hi << 8 | *lo) * MCE_TIME_UNIT; break; case MCE_RSP_EQIRNUMPORTS: ir->num_txports = *hi; ir->num_rxports = *lo; break; case MCE_RSP_EQIRRXCFCNT: /* * The carrier cycle counter can overflow and wrap around * without notice from the device. So frequency measurement * will be inaccurate with long duration IR. * * The long-range (non learning) receiver always reports * zero count so we always ignore its report. */ if (ir->carrier_report_enabled && ir->learning_active && ir->pulse_tunit > 0) { carrier_cycles = (*hi << 8 | *lo); /* * Adjust carrier cycle count by adding * 1 missed count per pulse "on" */ cycles_fix = ir->flags.rx2 == 2 ? ir->pulse_count : 0; rawir.carrier_report = 1; rawir.carrier = (1000000u / MCE_TIME_UNIT) * (carrier_cycles + cycles_fix) / ir->pulse_tunit; dev_dbg(ir->dev, "RX carrier frequency %u Hz (pulse count = %u, cycles = %u, duration = %u, rx2 = %u)", rawir.carrier, ir->pulse_count, carrier_cycles, ir->pulse_tunit, ir->flags.rx2); ir_raw_event_store(ir->rc, &rawir); } break; /* 1-byte return value commands */ case MCE_RSP_EQIRTXPORTS: ir->tx_mask = *hi; break; case MCE_RSP_EQIRRXPORTEN: ir->learning_active = ((*hi & 0x02) == 0x02); if (ir->rxports_active != *hi) { dev_info(ir->dev, "%s-range (0x%x) receiver active", ir->learning_active ? "short" : "long", *hi); ir->rxports_active = *hi; } break; /* No return value commands */ case MCE_RSP_CMD_ILLEGAL: case MCE_RSP_TX_TIMEOUT: ir->need_reset = true; break; default: break; } } static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len) { struct ir_raw_event rawir = {}; bool event = false; int i = 0; /* skip meaningless 0xb1 0x60 header bytes on orig receiver */ if (ir->flags.microsoft_gen1) i = 2; /* if there's no data, just return now */ if (buf_len <= i) return; for (; i < buf_len; i++) { switch (ir->parser_state) { case SUBCMD: ir->rem = mceusb_cmd_datasize(ir->cmd, ir->buf_in[i]); mceusb_dev_printdata(ir, ir->buf_in, buf_len, i - 1, ir->rem + 2, false); if (i + ir->rem < buf_len) mceusb_handle_command(ir, &ir->buf_in[i - 1]); ir->parser_state = CMD_DATA; break; case PARSE_IRDATA: ir->rem--; rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0); rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK); if (unlikely(!rawir.duration)) { dev_dbg(ir->dev, "nonsensical irdata %02x with duration 0", ir->buf_in[i]); break; } if (rawir.pulse) { ir->pulse_tunit += rawir.duration; ir->pulse_count++; } rawir.duration *= MCE_TIME_UNIT; dev_dbg(ir->dev, "Storing %s %u us (%02x)", rawir.pulse ? "pulse" : "space", rawir.duration, ir->buf_in[i]); if (ir_raw_event_store_with_filter(ir->rc, &rawir)) event = true; break; case CMD_DATA: ir->rem--; break; case CMD_HEADER: ir->cmd = ir->buf_in[i]; if ((ir->cmd == MCE_CMD_PORT_IR) || ((ir->cmd & MCE_PORT_MASK) != MCE_COMMAND_IRDATA)) { /* * got PORT_SYS, PORT_IR, or unknown * command response prefix */ ir->parser_state = SUBCMD; continue; } /* * got IR data prefix (0x80 + num_bytes) * decode MCE packets of the form {0x83, AA, BB, CC} * IR data packets can span USB messages */ ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK); mceusb_dev_printdata(ir, ir->buf_in, buf_len, i, ir->rem + 1, false); if (ir->rem) { ir->parser_state = PARSE_IRDATA; } else { struct ir_raw_event ev = { .timeout = 1, .duration = ir->rc->timeout }; if (ir_raw_event_store_with_filter(ir->rc, &ev)) event = true; ir->pulse_tunit = 0; ir->pulse_count = 0; } break; } if (ir->parser_state != CMD_HEADER && !ir->rem) ir->parser_state = CMD_HEADER; } /* * Accept IR data spanning multiple rx buffers. * Reject MCE command response spanning multiple rx buffers. */ if (ir->parser_state != PARSE_IRDATA || !ir->rem) ir->parser_state = CMD_HEADER; if (event) { dev_dbg(ir->dev, "processed IR data"); ir_raw_event_handle(ir->rc); } } static void mceusb_dev_recv(struct urb *urb) { struct mceusb_dev *ir; if (!urb) return; ir = urb->context; if (!ir) { usb_unlink_urb(urb); return; } switch (urb->status) { /* success */ case 0: mceusb_process_ir_data(ir, urb->actual_length); break; case -ECONNRESET: case -ENOENT: case -EILSEQ: case -EPROTO: case -ESHUTDOWN: usb_unlink_urb(urb); return; case -EPIPE: dev_err(ir->dev, "Error: urb status = %d (RX HALT)", urb->status); mceusb_defer_kevent(ir, EVENT_RX_HALT); return; default: dev_err(ir->dev, "Error: urb status = %d", urb->status); break; } usb_submit_urb(urb, GFP_ATOMIC); } static void mceusb_get_emulator_version(struct mceusb_dev *ir) { /* If we get no reply or an illegal command reply, its ver 1, says MS */ ir->emver = 1; mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER)); } static void mceusb_gen1_init(struct mceusb_dev *ir) { int ret; struct device *dev = ir->dev; char data[USB_CTRL_MSG_SZ]; /* * This is a strange one. Windows issues a set address to the device * on the receive control pipe and expect a certain value pair back */ ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS, USB_DIR_IN | USB_TYPE_VENDOR, 0, 0, data, USB_CTRL_MSG_SZ, 3000, GFP_KERNEL); dev_dbg(dev, "set address - ret = %d", ret); dev_dbg(dev, "set address - data[0] = %d, data[1] = %d", data[0], data[1]); /* set feature: bit rate 38400 bps */ ret = usb_control_msg_send(ir->usbdev, 0, USB_REQ_SET_FEATURE, USB_TYPE_VENDOR, 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set feature - ret = %d", ret); /* bRequest 4: set char length to 8 bits */ ret = usb_control_msg_send(ir->usbdev, 0, 4, USB_TYPE_VENDOR, 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set char length - retB = %d", ret); /* bRequest 2: set handshaking to use DTR/DSR */ ret = usb_control_msg_send(ir->usbdev, 0, 2, USB_TYPE_VENDOR, 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL); dev_dbg(dev, "set handshake - retC = %d", ret); /* device resume */ mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); /* get hw/sw revision? */ mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION)); } static void mceusb_gen2_init(struct mceusb_dev *ir) { /* device resume */ mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME)); /* get wake version (protocol, key, address) */ mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION)); /* unknown what this one actually returns... */ mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2)); } static void mceusb_get_parameters(struct mceusb_dev *ir) { int i; unsigned char cmdbuf[3] = { MCE_CMD_PORT_SYS, MCE_CMD_GETPORTSTATUS, 0x00 }; /* defaults, if the hardware doesn't support querying */ ir->num_txports = 2; ir->num_rxports = 2; /* get number of tx and rx ports */ mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS)); /* get the carrier and frequency */ mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ)); if (ir->num_txports && !ir->flags.no_tx) /* get the transmitter bitmask */ mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK)); /* get receiver timeout value */ mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT)); /* get receiver sensor setting */ mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR)); for (i = 0; i < ir->num_txports; i++) { cmdbuf[2] = i; mce_command_out(ir, cmdbuf, sizeof(cmdbuf)); } } static void mceusb_flash_led(struct mceusb_dev *ir) { if (ir->emver < 2) return; mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED)); } /* * Workqueue function * for resetting or recovering device after occurrence of error events * specified in ir->kevent bit field. * Function runs (via schedule_work()) in non-interrupt context, for * calls here (such as usb_clear_halt()) requiring non-interrupt context. */ static void mceusb_deferred_kevent(struct work_struct *work) { struct mceusb_dev *ir = container_of(work, struct mceusb_dev, kevent); int status; dev_err(ir->dev, "kevent handler called (flags 0x%lx)", ir->kevent_flags); if (test_bit(EVENT_RST_PEND, &ir->kevent_flags)) { dev_err(ir->dev, "kevent handler canceled pending USB Reset Device"); return; } if (test_bit(EVENT_RX_HALT, &ir->kevent_flags)) { usb_unlink_urb(ir->urb_in); status = usb_clear_halt(ir->usbdev, ir->pipe_in); dev_err(ir->dev, "rx clear halt status = %d", status); if (status < 0) { /* * Unable to clear RX halt/stall. * Will need to call usb_reset_device(). */ dev_err(ir->dev, "stuck RX HALT state requires USB Reset Device to clear"); usb_queue_reset_device(ir->usbintf); set_bit(EVENT_RST_PEND, &ir->kevent_flags); clear_bit(EVENT_RX_HALT, &ir->kevent_flags); /* Cancel all other error events and handlers */ clear_bit(EVENT_TX_HALT, &ir->kevent_flags); return; } clear_bit(EVENT_RX_HALT, &ir->kevent_flags); status = usb_submit_urb(ir->urb_in, GFP_KERNEL); if (status < 0) { dev_err(ir->dev, "rx unhalt submit urb error = %d", status); } } if (test_bit(EVENT_TX_HALT, &ir->kevent_flags)) { status = usb_clear_halt(ir->usbdev, ir->pipe_out); dev_err(ir->dev, "tx clear halt status = %d", status); if (status < 0) { /* * Unable to clear TX halt/stall. * Will need to call usb_reset_device(). */ dev_err(ir->dev, "stuck TX HALT state requires USB Reset Device to clear"); usb_queue_reset_device(ir->usbintf); set_bit(EVENT_RST_PEND, &ir->kevent_flags); clear_bit(EVENT_TX_HALT, &ir->kevent_flags); /* Cancel all other error events and handlers */ clear_bit(EVENT_RX_HALT, &ir->kevent_flags); return; } clear_bit(EVENT_TX_HALT, &ir->kevent_flags); } } static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir) { struct usb_device *udev = ir->usbdev; struct device *dev = ir->dev; struct rc_dev *rc; int ret; rc = rc_allocate_device(RC_DRIVER_IR_RAW); if (!rc) { dev_err(dev, "remote dev allocation failed"); goto out; } usb_make_path(ir->usbdev, ir->phys, sizeof(ir->phys)); rc->device_name = mceusb_model[ir->model].name ? : "Media Center Ed. eHome Infrared Remote Transceiver"; rc->input_phys = ir->phys; usb_to_input_id(ir->usbdev, &rc->input_id); rc->dev.parent = dev; rc->priv = ir; rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER; rc->rx_resolution = MCE_TIME_UNIT; rc->min_timeout = MCE_TIME_UNIT; rc->timeout = MS_TO_US(100); if (!mceusb_model[ir->model].broken_irtimeout) { rc->s_timeout = mceusb_set_timeout; rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT; } else { /* * If we can't set the timeout using CMD_SETIRTIMEOUT, we can * rely on software timeouts for timeouts < 100ms. */ rc->max_timeout = rc->timeout; } if (!ir->flags.no_tx) { rc->s_tx_mask = mceusb_set_tx_mask; rc->s_tx_carrier = mceusb_set_tx_carrier; rc->tx_ir = mceusb_tx_ir; } if (ir->flags.rx2 > 0) { rc->s_wideband_receiver = mceusb_set_rx_wideband; rc->s_carrier_report = mceusb_set_rx_carrier_report; } rc->driver_name = DRIVER_NAME; switch (le16_to_cpu(udev->descriptor.idVendor)) { case VENDOR_HAUPPAUGE: rc->map_name = RC_MAP_HAUPPAUGE; break; case VENDOR_PCTV: rc->map_name = RC_MAP_PINNACLE_PCTV_HD; break; default: rc->map_name = RC_MAP_RC6_MCE; } if (mceusb_model[ir->model].rc_map) rc->map_name = mceusb_model[ir->model].rc_map; ret = rc_register_device(rc); if (ret < 0) { dev_err(dev, "remote dev registration failed"); goto out; } return rc; out: rc_free_device(rc); return NULL; } static int mceusb_dev_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct usb_host_interface *idesc; struct usb_endpoint_descriptor *ep = NULL; struct usb_endpoint_descriptor *ep_in = NULL; struct usb_endpoint_descriptor *ep_out = NULL; struct mceusb_dev *ir = NULL; int pipe, maxp, i, res; char buf[63], name[128] = ""; enum mceusb_model_type model = id->driver_info; bool is_gen3; bool is_microsoft_gen1; bool tx_mask_normal; int ir_intfnum; dev_dbg(&intf->dev, "%s called", __func__); idesc = intf->cur_altsetting; is_gen3 = mceusb_model[model].mce_gen3; is_microsoft_gen1 = mceusb_model[model].mce_gen1; tx_mask_normal = mceusb_model[model].tx_mask_normal; ir_intfnum = mceusb_model[model].ir_intfnum; /* There are multi-function devices with non-IR interfaces */ if (idesc->desc.bInterfaceNumber != ir_intfnum) return -ENODEV; /* step through the endpoints to find first bulk in and out endpoint */ for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { ep = &idesc->endpoint[i].desc; if (ep_in == NULL) { if (usb_endpoint_is_bulk_in(ep)) { ep_in = ep; dev_dbg(&intf->dev, "acceptable bulk inbound endpoint found\n"); } else if (usb_endpoint_is_int_in(ep)) { ep_in = ep; ep_in->bInterval = 1; dev_dbg(&intf->dev, "acceptable interrupt inbound endpoint found\n"); } } if (ep_out == NULL) { if (usb_endpoint_is_bulk_out(ep)) { ep_out = ep; dev_dbg(&intf->dev, "acceptable bulk outbound endpoint found\n"); } else if (usb_endpoint_is_int_out(ep)) { ep_out = ep; ep_out->bInterval = 1; dev_dbg(&intf->dev, "acceptable interrupt outbound endpoint found\n"); } } } if (!ep_in || !ep_out) { dev_dbg(&intf->dev, "required endpoints not found\n"); return -ENODEV; } if (usb_endpoint_xfer_int(ep_in)) pipe = usb_rcvintpipe(dev, ep_in->bEndpointAddress); else pipe = usb_rcvbulkpipe(dev, ep_in->bEndpointAddress); maxp = usb_maxpacket(dev, pipe); ir = kzalloc(sizeof(struct mceusb_dev), GFP_KERNEL); if (!ir) goto mem_alloc_fail; ir->pipe_in = pipe; ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_KERNEL, &ir->dma_in); if (!ir->buf_in) goto buf_in_alloc_fail; ir->urb_in = usb_alloc_urb(0, GFP_KERNEL); if (!ir->urb_in) goto urb_in_alloc_fail; ir->usbintf = intf; ir->usbdev = usb_get_dev(dev); ir->dev = &intf->dev; ir->len_in = maxp; ir->flags.microsoft_gen1 = is_microsoft_gen1; ir->flags.tx_mask_normal = tx_mask_normal; ir->flags.no_tx = mceusb_model[model].no_tx; ir->flags.rx2 = mceusb_model[model].rx2; ir->model = model; /* Saving usb interface data for use by the transmitter routine */ ir->usb_ep_out = ep_out; if (usb_endpoint_xfer_int(ep_out)) ir->pipe_out = usb_sndintpipe(ir->usbdev, ep_out->bEndpointAddress); else ir->pipe_out = usb_sndbulkpipe(ir->usbdev, ep_out->bEndpointAddress); if (dev->descriptor.iManufacturer && usb_string(dev, dev->descriptor.iManufacturer, buf, sizeof(buf)) > 0) strscpy(name, buf, sizeof(name)); if (dev->descriptor.iProduct && usb_string(dev, dev->descriptor.iProduct, buf, sizeof(buf)) > 0) snprintf(name + strlen(name), sizeof(name) - strlen(name), " %s", buf); /* * Initialize async USB error handler before registering * or activating any mceusb RX and TX functions */ INIT_WORK(&ir->kevent, mceusb_deferred_kevent); ir->rc = mceusb_init_rc_dev(ir); if (!ir->rc) goto rc_dev_fail; /* wire up inbound data handler */ if (usb_endpoint_xfer_int(ep_in)) usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, mceusb_dev_recv, ir, ep_in->bInterval); else usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, mceusb_dev_recv, ir); ir->urb_in->transfer_dma = ir->dma_in; ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; /* flush buffers on the device */ dev_dbg(&intf->dev, "Flushing receive buffers"); res = usb_submit_urb(ir->urb_in, GFP_KERNEL); if (res) dev_err(&intf->dev, "failed to flush buffers: %d", res); /* figure out which firmware/emulator version this hardware has */ mceusb_get_emulator_version(ir); /* initialize device */ if (ir->flags.microsoft_gen1) mceusb_gen1_init(ir); else if (!is_gen3) mceusb_gen2_init(ir); mceusb_get_parameters(ir); mceusb_flash_led(ir); if (!ir->flags.no_tx) mceusb_set_tx_mask(ir->rc, MCE_DEFAULT_TX_MASK); usb_set_intfdata(intf, ir); /* enable wake via this device */ device_set_wakeup_capable(ir->dev, true); device_set_wakeup_enable(ir->dev, true); dev_info(&intf->dev, "Registered %s with mce emulator interface version %x", name, ir->emver); dev_info(&intf->dev, "%x tx ports (0x%x cabled) and %x rx sensors (0x%x active)", ir->num_txports, ir->txports_cabled, ir->num_rxports, ir->rxports_active); return 0; /* Error-handling path */ rc_dev_fail: cancel_work_sync(&ir->kevent); usb_put_dev(ir->usbdev); usb_kill_urb(ir->urb_in); usb_free_urb(ir->urb_in); urb_in_alloc_fail: usb_free_coherent(dev, maxp, ir->buf_in, ir->dma_in); buf_in_alloc_fail: kfree(ir); mem_alloc_fail: dev_err(&intf->dev, "%s: device setup failed!", __func__); return -ENOMEM; } static void mceusb_dev_disconnect(struct usb_interface *intf) { struct usb_device *dev = interface_to_usbdev(intf); struct mceusb_dev *ir = usb_get_intfdata(intf); dev_dbg(&intf->dev, "%s called", __func__); usb_set_intfdata(intf, NULL); if (!ir) return; ir->usbdev = NULL; cancel_work_sync(&ir->kevent); rc_unregister_device(ir->rc); usb_kill_urb(ir->urb_in); usb_free_urb(ir->urb_in); usb_free_coherent(dev, ir->len_in, ir->buf_in, ir->dma_in); usb_put_dev(dev); kfree(ir); } static int mceusb_dev_suspend(struct usb_interface *intf, pm_message_t message) { struct mceusb_dev *ir = usb_get_intfdata(intf); dev_info(ir->dev, "suspend"); usb_kill_urb(ir->urb_in); return 0; } static int mceusb_dev_resume(struct usb_interface *intf) { struct mceusb_dev *ir = usb_get_intfdata(intf); dev_info(ir->dev, "resume"); if (usb_submit_urb(ir->urb_in, GFP_ATOMIC)) return -EIO; return 0; } static struct usb_driver mceusb_dev_driver = { .name = DRIVER_NAME, .probe = mceusb_dev_probe, .disconnect = mceusb_dev_disconnect, .suspend = mceusb_dev_suspend, .resume = mceusb_dev_resume, .reset_resume = mceusb_dev_resume, .id_table = mceusb_dev_table }; module_usb_driver(mceusb_dev_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, mceusb_dev_table); |
| 120 114 114 58 126 68 69 69 69 69 115 339 69 69 68 69 68 68 69 125 16 339 16 126 115 115 16 338 337 215 153 153 124 339 339 339 339 69 115 58 58 1 58 58 58 58 58 58 51 17 58 58 57 34 58 16 58 58 40 40 40 31 18 17 17 17 17 1 1 1 1 1 1 58 58 246 245 57 245 246 244 237 62 62 62 62 62 52 52 237 55 157 157 55 55 27 54 55 26 55 157 28 28 28 28 28 28 28 28 28 28 28 28 28 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 1 1 2 2 5 5 5 1 1 1 5 2 1 1 1 1 1 1 1 1 2 3 3 3 3 3 3 3 2 3 3 3 3 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 | // SPDX-License-Identifier: GPL-2.0 #include <linux/rcupdate.h> #include <linux/spinlock.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/cache.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/tcp.h> #include <linux/hash.h> #include <linux/tcp_metrics.h> #include <linux/vmalloc.h> #include <net/inet_connection_sock.h> #include <net/net_namespace.h> #include <net/request_sock.h> #include <net/inetpeer.h> #include <net/sock.h> #include <net/ipv6.h> #include <net/dst.h> #include <net/tcp.h> #include <net/genetlink.h> static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr, const struct inetpeer_addr *daddr, struct net *net, unsigned int hash); struct tcp_fastopen_metrics { u16 mss; u16 syn_loss:10, /* Recurring Fast Open SYN losses */ try_exp:2; /* Request w/ exp. option (once) */ unsigned long last_syn_loss; /* Last Fast Open SYN loss */ struct tcp_fastopen_cookie cookie; }; /* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility * Kernel only stores RTT and RTTVAR in usec resolution */ #define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2) struct tcp_metrics_block { struct tcp_metrics_block __rcu *tcpm_next; struct net *tcpm_net; struct inetpeer_addr tcpm_saddr; struct inetpeer_addr tcpm_daddr; unsigned long tcpm_stamp; u32 tcpm_lock; u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1]; struct tcp_fastopen_metrics tcpm_fastopen; struct rcu_head rcu_head; }; static inline struct net *tm_net(const struct tcp_metrics_block *tm) { /* Paired with the WRITE_ONCE() in tcpm_new() */ return READ_ONCE(tm->tcpm_net); } static bool tcp_metric_locked(struct tcp_metrics_block *tm, enum tcp_metric_index idx) { /* Paired with WRITE_ONCE() in tcpm_suck_dst() */ return READ_ONCE(tm->tcpm_lock) & (1 << idx); } static u32 tcp_metric_get(const struct tcp_metrics_block *tm, enum tcp_metric_index idx) { /* Paired with WRITE_ONCE() in tcp_metric_set() */ return READ_ONCE(tm->tcpm_vals[idx]); } static void tcp_metric_set(struct tcp_metrics_block *tm, enum tcp_metric_index idx, u32 val) { /* Paired with READ_ONCE() in tcp_metric_get() */ WRITE_ONCE(tm->tcpm_vals[idx], val); } static bool addr_same(const struct inetpeer_addr *a, const struct inetpeer_addr *b) { return (a->family == b->family) && !inetpeer_addr_cmp(a, b); } struct tcpm_hash_bucket { struct tcp_metrics_block __rcu *chain; }; static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly; static unsigned int tcp_metrics_hash_log __read_mostly; static DEFINE_SPINLOCK(tcp_metrics_lock); static DEFINE_SEQLOCK(fastopen_seqlock); static void tcpm_suck_dst(struct tcp_metrics_block *tm, const struct dst_entry *dst, bool fastopen_clear) { u32 msval; u32 val; WRITE_ONCE(tm->tcpm_stamp, jiffies); val = 0; if (dst_metric_locked(dst, RTAX_RTT)) val |= 1 << TCP_METRIC_RTT; if (dst_metric_locked(dst, RTAX_RTTVAR)) val |= 1 << TCP_METRIC_RTTVAR; if (dst_metric_locked(dst, RTAX_SSTHRESH)) val |= 1 << TCP_METRIC_SSTHRESH; if (dst_metric_locked(dst, RTAX_CWND)) val |= 1 << TCP_METRIC_CWND; if (dst_metric_locked(dst, RTAX_REORDERING)) val |= 1 << TCP_METRIC_REORDERING; /* Paired with READ_ONCE() in tcp_metric_locked() */ WRITE_ONCE(tm->tcpm_lock, val); msval = dst_metric_raw(dst, RTAX_RTT); tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC); msval = dst_metric_raw(dst, RTAX_RTTVAR); tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC); tcp_metric_set(tm, TCP_METRIC_SSTHRESH, dst_metric_raw(dst, RTAX_SSTHRESH)); tcp_metric_set(tm, TCP_METRIC_CWND, dst_metric_raw(dst, RTAX_CWND)); tcp_metric_set(tm, TCP_METRIC_REORDERING, dst_metric_raw(dst, RTAX_REORDERING)); if (fastopen_clear) { write_seqlock(&fastopen_seqlock); tm->tcpm_fastopen.mss = 0; tm->tcpm_fastopen.syn_loss = 0; tm->tcpm_fastopen.try_exp = 0; tm->tcpm_fastopen.cookie.exp = false; tm->tcpm_fastopen.cookie.len = 0; write_sequnlock(&fastopen_seqlock); } } #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) static void tcpm_check_stamp(struct tcp_metrics_block *tm, const struct dst_entry *dst) { unsigned long limit; if (!tm) return; limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT; if (unlikely(time_after(jiffies, limit))) tcpm_suck_dst(tm, dst, false); } #define TCP_METRICS_RECLAIM_DEPTH 5 #define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL #define deref_locked(p) \ rcu_dereference_protected(p, lockdep_is_held(&tcp_metrics_lock)) static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, struct inetpeer_addr *saddr, struct inetpeer_addr *daddr, unsigned int hash) { struct tcp_metrics_block *tm; struct net *net; bool reclaim = false; spin_lock_bh(&tcp_metrics_lock); net = dev_net_rcu(dst->dev); /* While waiting for the spin-lock the cache might have been populated * with this entry and so we have to check again. */ tm = __tcp_get_metrics(saddr, daddr, net, hash); if (tm == TCP_METRICS_RECLAIM_PTR) { reclaim = true; tm = NULL; } if (tm) { tcpm_check_stamp(tm, dst); goto out_unlock; } if (unlikely(reclaim)) { struct tcp_metrics_block *oldest; oldest = deref_locked(tcp_metrics_hash[hash].chain); for (tm = deref_locked(oldest->tcpm_next); tm; tm = deref_locked(tm->tcpm_next)) { if (time_before(READ_ONCE(tm->tcpm_stamp), READ_ONCE(oldest->tcpm_stamp))) oldest = tm; } tm = oldest; } else { tm = kzalloc(sizeof(*tm), GFP_ATOMIC); if (!tm) goto out_unlock; } /* Paired with the READ_ONCE() in tm_net() */ WRITE_ONCE(tm->tcpm_net, net); tm->tcpm_saddr = *saddr; tm->tcpm_daddr = *daddr; tcpm_suck_dst(tm, dst, reclaim); if (likely(!reclaim)) { tm->tcpm_next = tcp_metrics_hash[hash].chain; rcu_assign_pointer(tcp_metrics_hash[hash].chain, tm); } out_unlock: spin_unlock_bh(&tcp_metrics_lock); return tm; } static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth) { if (tm) return tm; if (depth > TCP_METRICS_RECLAIM_DEPTH) return TCP_METRICS_RECLAIM_PTR; return NULL; } static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *saddr, const struct inetpeer_addr *daddr, struct net *net, unsigned int hash) { struct tcp_metrics_block *tm; int depth = 0; for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_saddr, saddr) && addr_same(&tm->tcpm_daddr, daddr) && net_eq(tm_net(tm), net)) break; depth++; } return tcp_get_encode(tm, depth); } static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req, struct dst_entry *dst) { struct tcp_metrics_block *tm; struct inetpeer_addr saddr, daddr; unsigned int hash; struct net *net; saddr.family = req->rsk_ops->family; daddr.family = req->rsk_ops->family; switch (daddr.family) { case AF_INET: inetpeer_set_addr_v4(&saddr, inet_rsk(req)->ir_loc_addr); inetpeer_set_addr_v4(&daddr, inet_rsk(req)->ir_rmt_addr); hash = ipv4_addr_hash(inet_rsk(req)->ir_rmt_addr); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: inetpeer_set_addr_v6(&saddr, &inet_rsk(req)->ir_v6_loc_addr); inetpeer_set_addr_v6(&daddr, &inet_rsk(req)->ir_v6_rmt_addr); hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr); break; #endif default: return NULL; } net = dev_net_rcu(dst->dev); hash ^= net_hash_mix(net); hash = hash_32(hash, tcp_metrics_hash_log); for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_saddr, &saddr) && addr_same(&tm->tcpm_daddr, &daddr) && net_eq(tm_net(tm), net)) break; } tcpm_check_stamp(tm, dst); return tm; } static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk, struct dst_entry *dst, bool create) { struct tcp_metrics_block *tm; struct inetpeer_addr saddr, daddr; unsigned int hash; struct net *net; if (sk->sk_family == AF_INET) { inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr); inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr); hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { if (ipv6_addr_v4mapped(&sk->sk_v6_daddr)) { inetpeer_set_addr_v4(&saddr, inet_sk(sk)->inet_saddr); inetpeer_set_addr_v4(&daddr, inet_sk(sk)->inet_daddr); hash = ipv4_addr_hash(inet_sk(sk)->inet_daddr); } else { inetpeer_set_addr_v6(&saddr, &sk->sk_v6_rcv_saddr); inetpeer_set_addr_v6(&daddr, &sk->sk_v6_daddr); hash = ipv6_addr_hash(&sk->sk_v6_daddr); } } #endif else return NULL; net = dev_net_rcu(dst->dev); hash ^= net_hash_mix(net); hash = hash_32(hash, tcp_metrics_hash_log); tm = __tcp_get_metrics(&saddr, &daddr, net, hash); if (tm == TCP_METRICS_RECLAIM_PTR) tm = NULL; if (!tm && create) tm = tcpm_new(dst, &saddr, &daddr, hash); else tcpm_check_stamp(tm, dst); return tm; } /* Save metrics learned by this TCP session. This function is called * only, when TCP finishes successfully i.e. when it enters TIME-WAIT * or goes from LAST-ACK to CLOSE. */ void tcp_update_metrics(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct dst_entry *dst = __sk_dst_get(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct tcp_metrics_block *tm; unsigned long rtt; u32 val; int m; sk_dst_confirm(sk); if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst) return; rcu_read_lock(); if (icsk->icsk_backoff || !tp->srtt_us) { /* This session failed to estimate rtt. Why? * Probably, no packets returned in time. Reset our * results. */ tm = tcp_get_metrics(sk, dst, false); if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT)) tcp_metric_set(tm, TCP_METRIC_RTT, 0); goto out_unlock; } else tm = tcp_get_metrics(sk, dst, true); if (!tm) goto out_unlock; rtt = tcp_metric_get(tm, TCP_METRIC_RTT); m = rtt - tp->srtt_us; /* If newly calculated rtt larger than stored one, store new * one. Otherwise, use EWMA. Remember, rtt overestimation is * always better than underestimation. */ if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) { if (m <= 0) rtt = tp->srtt_us; else rtt -= (m >> 3); tcp_metric_set(tm, TCP_METRIC_RTT, rtt); } if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) { unsigned long var; if (m < 0) m = -m; /* Scale deviation to rttvar fixed point */ m >>= 1; if (m < tp->mdev_us) m = tp->mdev_us; var = tcp_metric_get(tm, TCP_METRIC_RTTVAR); if (m >= var) var = m; else var -= (var - m) >> 2; tcp_metric_set(tm, TCP_METRIC_RTTVAR, var); } if (tcp_in_initial_slowstart(tp)) { /* Slow start still did not finish. */ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) && !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val && (tcp_snd_cwnd(tp) >> 1) > val) tcp_metric_set(tm, TCP_METRIC_SSTHRESH, tcp_snd_cwnd(tp) >> 1); } if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { val = tcp_metric_get(tm, TCP_METRIC_CWND); if (tcp_snd_cwnd(tp) > val) tcp_metric_set(tm, TCP_METRIC_CWND, tcp_snd_cwnd(tp)); } } else if (!tcp_in_slow_start(tp) && icsk->icsk_ca_state == TCP_CA_Open) { /* Cong. avoidance phase, cwnd is reliable. */ if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) && !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) tcp_metric_set(tm, TCP_METRIC_SSTHRESH, max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh)); if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { val = tcp_metric_get(tm, TCP_METRIC_CWND); tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1); } } else { /* Else slow start did not finish, cwnd is non-sense, * ssthresh may be also invalid. */ if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { val = tcp_metric_get(tm, TCP_METRIC_CWND); tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_ssthresh) >> 1); } if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) && !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val && tp->snd_ssthresh > val) tcp_metric_set(tm, TCP_METRIC_SSTHRESH, tp->snd_ssthresh); } if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val < tp->reordering && tp->reordering != READ_ONCE(net->ipv4.sysctl_tcp_reordering)) tcp_metric_set(tm, TCP_METRIC_REORDERING, tp->reordering); } } WRITE_ONCE(tm->tcpm_stamp, jiffies); out_unlock: rcu_read_unlock(); } /* Initialize metrics on socket. */ void tcp_init_metrics(struct sock *sk) { struct dst_entry *dst = __sk_dst_get(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct tcp_metrics_block *tm; u32 val, crtt = 0; /* cached RTT scaled by 8 */ sk_dst_confirm(sk); /* ssthresh may have been reduced unnecessarily during. * 3WHS. Restore it back to its initial default. */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; if (!dst) goto reset; rcu_read_lock(); tm = tcp_get_metrics(sk, dst, false); if (!tm) { rcu_read_unlock(); goto reset; } if (tcp_metric_locked(tm, TCP_METRIC_CWND)) tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND); val = READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) ? 0 : tcp_metric_get(tm, TCP_METRIC_SSTHRESH); if (val) { tp->snd_ssthresh = val; if (tp->snd_ssthresh > tp->snd_cwnd_clamp) tp->snd_ssthresh = tp->snd_cwnd_clamp; } val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val && tp->reordering != val) tp->reordering = val; crtt = tcp_metric_get(tm, TCP_METRIC_RTT); rcu_read_unlock(); reset: /* The initial RTT measurement from the SYN/SYN-ACK is not ideal * to seed the RTO for later data packets because SYN packets are * small. Use the per-dst cached values to seed the RTO but keep * the RTT estimator variables intact (e.g., srtt, mdev, rttvar). * Later the RTO will be updated immediately upon obtaining the first * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only * influences the first RTO but not later RTT estimation. * * But if RTT is not available from the SYN (due to retransmits or * syn cookies) or the cache, force a conservative 3secs timeout. * * A bit of theory. RTT is time passed after "normal" sized packet * is sent until it is ACKed. In normal circumstances sending small * packets force peer to delay ACKs and calculation is correct too. * The algorithm is adaptive and, provided we follow specs, it * NEVER underestimate RTT. BUT! If peer tries to make some clever * tricks sort of "quick acks" for time long enough to decrease RTT * to low value, and then abruptly stops to do it and starts to delay * ACKs, wait for troubles. */ if (crtt > tp->srtt_us) { /* Set RTO like tcp_rtt_estimator(), but from cached RTT. */ crtt /= 8 * USEC_PER_SEC / HZ; inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk)); } else if (tp->srtt_us == 0) { /* RFC6298: 5.7 We've failed to get a valid RTT sample from * 3WHS. This is most likely due to retransmission, * including spurious one. Reset the RTO back to 3secs * from the more aggressive 1sec to avoid more spurious * retransmission. */ tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK); tp->mdev_us = tp->mdev_max_us = tp->rttvar_us; inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; } } bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst) { struct tcp_metrics_block *tm; bool ret; if (!dst) return false; rcu_read_lock(); tm = __tcp_get_metrics_req(req, dst); if (tm && tcp_metric_get(tm, TCP_METRIC_RTT)) ret = true; else ret = false; rcu_read_unlock(); return ret; } void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie) { struct tcp_metrics_block *tm; rcu_read_lock(); tm = tcp_get_metrics(sk, __sk_dst_get(sk), false); if (tm) { struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; unsigned int seq; do { seq = read_seqbegin(&fastopen_seqlock); if (tfom->mss) *mss = tfom->mss; *cookie = tfom->cookie; if (cookie->len <= 0 && tfom->try_exp == 1) cookie->exp = true; } while (read_seqretry(&fastopen_seqlock, seq)); } rcu_read_unlock(); } void tcp_fastopen_cache_set(struct sock *sk, u16 mss, struct tcp_fastopen_cookie *cookie, bool syn_lost, u16 try_exp) { struct dst_entry *dst = __sk_dst_get(sk); struct tcp_metrics_block *tm; if (!dst) return; rcu_read_lock(); tm = tcp_get_metrics(sk, dst, true); if (tm) { struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen; write_seqlock_bh(&fastopen_seqlock); if (mss) tfom->mss = mss; if (cookie && cookie->len > 0) tfom->cookie = *cookie; else if (try_exp > tfom->try_exp && tfom->cookie.len <= 0 && !tfom->cookie.exp) tfom->try_exp = try_exp; if (syn_lost) { ++tfom->syn_loss; tfom->last_syn_loss = jiffies; } else tfom->syn_loss = 0; write_sequnlock_bh(&fastopen_seqlock); } rcu_read_unlock(); } static struct genl_family tcp_metrics_nl_family; static const struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = { [TCP_METRICS_ATTR_ADDR_IPV4] = { .type = NLA_U32, }, [TCP_METRICS_ATTR_ADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), [TCP_METRICS_ATTR_SADDR_IPV4] = { .type = NLA_U32, }, [TCP_METRICS_ATTR_SADDR_IPV6] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)), /* Following attributes are not received for GET/DEL, * we keep them for reference */ #if 0 [TCP_METRICS_ATTR_AGE] = { .type = NLA_MSECS, }, [TCP_METRICS_ATTR_TW_TSVAL] = { .type = NLA_U32, }, [TCP_METRICS_ATTR_TW_TS_STAMP] = { .type = NLA_S32, }, [TCP_METRICS_ATTR_VALS] = { .type = NLA_NESTED, }, [TCP_METRICS_ATTR_FOPEN_MSS] = { .type = NLA_U16, }, [TCP_METRICS_ATTR_FOPEN_SYN_DROPS] = { .type = NLA_U16, }, [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS] = { .type = NLA_MSECS, }, [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY, .len = TCP_FASTOPEN_COOKIE_MAX, }, #endif }; /* Add attributes, caller cancels its header on failure */ static int tcp_metrics_fill_info(struct sk_buff *msg, struct tcp_metrics_block *tm) { struct nlattr *nest; int i; switch (tm->tcpm_daddr.family) { case AF_INET: if (nla_put_in_addr(msg, TCP_METRICS_ATTR_ADDR_IPV4, inetpeer_get_addr_v4(&tm->tcpm_daddr)) < 0) goto nla_put_failure; if (nla_put_in_addr(msg, TCP_METRICS_ATTR_SADDR_IPV4, inetpeer_get_addr_v4(&tm->tcpm_saddr)) < 0) goto nla_put_failure; break; case AF_INET6: if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_ADDR_IPV6, inetpeer_get_addr_v6(&tm->tcpm_daddr)) < 0) goto nla_put_failure; if (nla_put_in6_addr(msg, TCP_METRICS_ATTR_SADDR_IPV6, inetpeer_get_addr_v6(&tm->tcpm_saddr)) < 0) goto nla_put_failure; break; default: return -EAFNOSUPPORT; } if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, jiffies - READ_ONCE(tm->tcpm_stamp), TCP_METRICS_ATTR_PAD) < 0) goto nla_put_failure; { int n = 0; nest = nla_nest_start_noflag(msg, TCP_METRICS_ATTR_VALS); if (!nest) goto nla_put_failure; for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) { u32 val = tcp_metric_get(tm, i); if (!val) continue; if (i == TCP_METRIC_RTT) { if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1, val) < 0) goto nla_put_failure; n++; val = max(val / 1000, 1U); } if (i == TCP_METRIC_RTTVAR) { if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1, val) < 0) goto nla_put_failure; n++; val = max(val / 1000, 1U); } if (nla_put_u32(msg, i + 1, val) < 0) goto nla_put_failure; n++; } if (n) nla_nest_end(msg, nest); else nla_nest_cancel(msg, nest); } { struct tcp_fastopen_metrics tfom_copy[1], *tfom; unsigned int seq; do { seq = read_seqbegin(&fastopen_seqlock); tfom_copy[0] = tm->tcpm_fastopen; } while (read_seqretry(&fastopen_seqlock, seq)); tfom = tfom_copy; if (tfom->mss && nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS, tfom->mss) < 0) goto nla_put_failure; if (tfom->syn_loss && (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS, tfom->syn_loss) < 0 || nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS, jiffies - tfom->last_syn_loss, TCP_METRICS_ATTR_PAD) < 0)) goto nla_put_failure; if (tfom->cookie.len > 0 && nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE, tfom->cookie.len, tfom->cookie.val) < 0) goto nla_put_failure; } return 0; nla_put_failure: return -EMSGSIZE; } static int tcp_metrics_dump_info(struct sk_buff *skb, struct netlink_callback *cb, struct tcp_metrics_block *tm) { void *hdr; hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &tcp_metrics_nl_family, NLM_F_MULTI, TCP_METRICS_CMD_GET); if (!hdr) return -EMSGSIZE; if (tcp_metrics_fill_info(skb, tm) < 0) goto nla_put_failure; genlmsg_end(skb, hdr); return 0; nla_put_failure: genlmsg_cancel(skb, hdr); return -EMSGSIZE; } static int tcp_metrics_nl_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); unsigned int max_rows = 1U << tcp_metrics_hash_log; unsigned int row, s_row = cb->args[0]; int s_col = cb->args[1], col = s_col; int res = 0; for (row = s_row; row < max_rows; row++, s_col = 0) { struct tcp_metrics_block *tm; struct tcpm_hash_bucket *hb = tcp_metrics_hash + row; rcu_read_lock(); for (col = 0, tm = rcu_dereference(hb->chain); tm; tm = rcu_dereference(tm->tcpm_next), col++) { if (!net_eq(tm_net(tm), net)) continue; if (col < s_col) continue; res = tcp_metrics_dump_info(skb, cb, tm); if (res < 0) { rcu_read_unlock(); goto done; } } rcu_read_unlock(); } done: cb->args[0] = row; cb->args[1] = col; return res; } static int __parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, unsigned int *hash, int optional, int v4, int v6) { struct nlattr *a; a = info->attrs[v4]; if (a) { inetpeer_set_addr_v4(addr, nla_get_in_addr(a)); if (hash) *hash = ipv4_addr_hash(inetpeer_get_addr_v4(addr)); return 0; } a = info->attrs[v6]; if (a) { struct in6_addr in6; in6 = nla_get_in6_addr(a); inetpeer_set_addr_v6(addr, &in6); if (hash) *hash = ipv6_addr_hash(inetpeer_get_addr_v6(addr)); return 0; } return optional ? 1 : -EAFNOSUPPORT; } static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr, unsigned int *hash, int optional) { return __parse_nl_addr(info, addr, hash, optional, TCP_METRICS_ATTR_ADDR_IPV4, TCP_METRICS_ATTR_ADDR_IPV6); } static int parse_nl_saddr(struct genl_info *info, struct inetpeer_addr *addr) { return __parse_nl_addr(info, addr, NULL, 0, TCP_METRICS_ATTR_SADDR_IPV4, TCP_METRICS_ATTR_SADDR_IPV6); } static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info) { struct tcp_metrics_block *tm; struct inetpeer_addr saddr, daddr; unsigned int hash; struct sk_buff *msg; struct net *net = genl_info_net(info); void *reply; int ret; bool src = true; ret = parse_nl_addr(info, &daddr, &hash, 0); if (ret < 0) return ret; ret = parse_nl_saddr(info, &saddr); if (ret < 0) src = false; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0, info->genlhdr->cmd); if (!reply) goto nla_put_failure; hash ^= net_hash_mix(net); hash = hash_32(hash, tcp_metrics_hash_log); ret = -ESRCH; rcu_read_lock(); for (tm = rcu_dereference(tcp_metrics_hash[hash].chain); tm; tm = rcu_dereference(tm->tcpm_next)) { if (addr_same(&tm->tcpm_daddr, &daddr) && (!src || addr_same(&tm->tcpm_saddr, &saddr)) && net_eq(tm_net(tm), net)) { ret = tcp_metrics_fill_info(msg, tm); break; } } rcu_read_unlock(); if (ret < 0) goto out_free; genlmsg_end(msg, reply); return genlmsg_reply(msg, info); nla_put_failure: ret = -EMSGSIZE; out_free: nlmsg_free(msg); return ret; } static void tcp_metrics_flush_all(struct net *net) { unsigned int max_rows = 1U << tcp_metrics_hash_log; struct tcpm_hash_bucket *hb = tcp_metrics_hash; struct tcp_metrics_block *tm; unsigned int row; for (row = 0; row < max_rows; row++, hb++) { struct tcp_metrics_block __rcu **pp = &hb->chain; bool match; if (!rcu_access_pointer(*pp)) continue; spin_lock_bh(&tcp_metrics_lock); for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { match = net ? net_eq(tm_net(tm), net) : !refcount_read(&tm_net(tm)->ns.count); if (match) { rcu_assign_pointer(*pp, tm->tcpm_next); kfree_rcu(tm, rcu_head); } else { pp = &tm->tcpm_next; } } spin_unlock_bh(&tcp_metrics_lock); cond_resched(); } } static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info) { struct tcpm_hash_bucket *hb; struct tcp_metrics_block *tm; struct tcp_metrics_block __rcu **pp; struct inetpeer_addr saddr, daddr; unsigned int hash; struct net *net = genl_info_net(info); int ret; bool src = true, found = false; ret = parse_nl_addr(info, &daddr, &hash, 1); if (ret < 0) return ret; if (ret > 0) { tcp_metrics_flush_all(net); return 0; } ret = parse_nl_saddr(info, &saddr); if (ret < 0) src = false; hash ^= net_hash_mix(net); hash = hash_32(hash, tcp_metrics_hash_log); hb = tcp_metrics_hash + hash; pp = &hb->chain; spin_lock_bh(&tcp_metrics_lock); for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { if (addr_same(&tm->tcpm_daddr, &daddr) && (!src || addr_same(&tm->tcpm_saddr, &saddr)) && net_eq(tm_net(tm), net)) { rcu_assign_pointer(*pp, tm->tcpm_next); kfree_rcu(tm, rcu_head); found = true; } else { pp = &tm->tcpm_next; } } spin_unlock_bh(&tcp_metrics_lock); if (!found) return -ESRCH; return 0; } static const struct genl_small_ops tcp_metrics_nl_ops[] = { { .cmd = TCP_METRICS_CMD_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = tcp_metrics_nl_cmd_get, .dumpit = tcp_metrics_nl_dump, }, { .cmd = TCP_METRICS_CMD_DEL, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = tcp_metrics_nl_cmd_del, .flags = GENL_ADMIN_PERM, }, }; static struct genl_family tcp_metrics_nl_family __ro_after_init = { .hdrsize = 0, .name = TCP_METRICS_GENL_NAME, .version = TCP_METRICS_GENL_VERSION, .maxattr = TCP_METRICS_ATTR_MAX, .policy = tcp_metrics_nl_policy, .netnsok = true, .parallel_ops = true, .module = THIS_MODULE, .small_ops = tcp_metrics_nl_ops, .n_small_ops = ARRAY_SIZE(tcp_metrics_nl_ops), .resv_start_op = TCP_METRICS_CMD_DEL + 1, }; static unsigned int tcpmhash_entries __initdata; static int __init set_tcpmhash_entries(char *str) { ssize_t ret; if (!str) return 0; ret = kstrtouint(str, 0, &tcpmhash_entries); if (ret) return 0; return 1; } __setup("tcpmhash_entries=", set_tcpmhash_entries); static void __init tcp_metrics_hash_alloc(void) { unsigned int slots = tcpmhash_entries; size_t size; if (!slots) { if (totalram_pages() >= 128 * 1024) slots = 16 * 1024; else slots = 8 * 1024; } tcp_metrics_hash_log = order_base_2(slots); size = sizeof(struct tcpm_hash_bucket) << tcp_metrics_hash_log; tcp_metrics_hash = kvzalloc(size, GFP_KERNEL); if (!tcp_metrics_hash) panic("Could not allocate the tcp_metrics hash table\n"); } static void __net_exit tcp_net_metrics_exit_batch(struct list_head *net_exit_list) { tcp_metrics_flush_all(NULL); } static __net_initdata struct pernet_operations tcp_net_metrics_ops = { .exit_batch = tcp_net_metrics_exit_batch, }; void __init tcp_metrics_init(void) { int ret; tcp_metrics_hash_alloc(); ret = register_pernet_subsys(&tcp_net_metrics_ops); if (ret < 0) panic("Could not register tcp_net_metrics_ops\n"); ret = genl_register_family(&tcp_metrics_nl_family); if (ret < 0) panic("Could not register tcp_metrics generic netlink\n"); } |
| 41 40 41 41 40 41 41 41 41 28 28 28 28 28 28 13 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 28 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 | // SPDX-License-Identifier: GPL-2.0-or-later /* X.509 certificate parser * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #define pr_fmt(fmt) "X.509: "fmt #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/oid_registry.h> #include <crypto/public_key.h> #include "x509_parser.h" #include "x509.asn1.h" #include "x509_akid.asn1.h" struct x509_parse_context { struct x509_certificate *cert; /* Certificate being constructed */ unsigned long data; /* Start of data */ const void *key; /* Key data */ size_t key_size; /* Size of key data */ const void *params; /* Key parameters */ size_t params_size; /* Size of key parameters */ enum OID key_algo; /* Algorithm used by the cert's key */ enum OID last_oid; /* Last OID encountered */ enum OID sig_algo; /* Algorithm used to sign the cert */ u8 o_size; /* Size of organizationName (O) */ u8 cn_size; /* Size of commonName (CN) */ u8 email_size; /* Size of emailAddress */ u16 o_offset; /* Offset of organizationName (O) */ u16 cn_offset; /* Offset of commonName (CN) */ u16 email_offset; /* Offset of emailAddress */ unsigned raw_akid_size; const void *raw_akid; /* Raw authorityKeyId in ASN.1 */ const void *akid_raw_issuer; /* Raw directoryName in authorityKeyId */ unsigned akid_raw_issuer_size; }; /* * Free an X.509 certificate */ void x509_free_certificate(struct x509_certificate *cert) { if (cert) { public_key_free(cert->pub); public_key_signature_free(cert->sig); kfree(cert->issuer); kfree(cert->subject); kfree(cert->id); kfree(cert->skid); kfree(cert); } } EXPORT_SYMBOL_GPL(x509_free_certificate); /* * Parse an X.509 certificate */ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) { struct x509_certificate *cert __free(x509_free_certificate); struct x509_parse_context *ctx __free(kfree) = NULL; struct asymmetric_key_id *kid; long ret; cert = kzalloc(sizeof(struct x509_certificate), GFP_KERNEL); if (!cert) return ERR_PTR(-ENOMEM); cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL); if (!cert->pub) return ERR_PTR(-ENOMEM); cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL); if (!cert->sig) return ERR_PTR(-ENOMEM); ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); ctx->cert = cert; ctx->data = (unsigned long)data; /* Attempt to decode the certificate */ ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen); if (ret < 0) return ERR_PTR(ret); /* Decode the AuthorityKeyIdentifier */ if (ctx->raw_akid) { pr_devel("AKID: %u %*phN\n", ctx->raw_akid_size, ctx->raw_akid_size, ctx->raw_akid); ret = asn1_ber_decoder(&x509_akid_decoder, ctx, ctx->raw_akid, ctx->raw_akid_size); if (ret < 0) { pr_warn("Couldn't decode AuthKeyIdentifier\n"); return ERR_PTR(ret); } } cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); if (!cert->pub->key) return ERR_PTR(-ENOMEM); cert->pub->keylen = ctx->key_size; cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL); if (!cert->pub->params) return ERR_PTR(-ENOMEM); cert->pub->paramlen = ctx->params_size; cert->pub->algo = ctx->key_algo; /* Grab the signature bits */ ret = x509_get_sig_params(cert); if (ret < 0) return ERR_PTR(ret); /* Generate cert issuer + serial number key ID */ kid = asymmetric_key_generate_id(cert->raw_serial, cert->raw_serial_size, cert->raw_issuer, cert->raw_issuer_size); if (IS_ERR(kid)) return ERR_CAST(kid); cert->id = kid; /* Detect self-signed certificates */ ret = x509_check_for_self_signed(cert); if (ret < 0) return ERR_PTR(ret); return_ptr(cert); } EXPORT_SYMBOL_GPL(x509_cert_parse); /* * Note an OID when we find one for later processing when we know how * to interpret it. */ int x509_note_OID(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; ctx->last_oid = look_up_OID(value, vlen); if (ctx->last_oid == OID__NR) { char buffer[50]; sprint_oid(value, vlen, buffer, sizeof(buffer)); pr_debug("Unknown OID: [%lu] %s\n", (unsigned long)value - ctx->data, buffer); } return 0; } /* * Save the position of the TBS data so that we can check the signature over it * later. */ int x509_note_tbs_certificate(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("x509_note_tbs_certificate(,%zu,%02x,%ld,%zu)!\n", hdrlen, tag, (unsigned long)value - ctx->data, vlen); ctx->cert->tbs = value - hdrlen; ctx->cert->tbs_size = vlen + hdrlen; return 0; } /* * Record the algorithm that was used to sign this certificate. */ int x509_note_sig_algo(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("PubKey Algo: %u\n", ctx->last_oid); switch (ctx->last_oid) { default: return -ENOPKG; /* Unsupported combination */ case OID_sha1WithRSAEncryption: ctx->cert->sig->hash_algo = "sha1"; goto rsa_pkcs1; case OID_sha256WithRSAEncryption: ctx->cert->sig->hash_algo = "sha256"; goto rsa_pkcs1; case OID_sha384WithRSAEncryption: ctx->cert->sig->hash_algo = "sha384"; goto rsa_pkcs1; case OID_sha512WithRSAEncryption: ctx->cert->sig->hash_algo = "sha512"; goto rsa_pkcs1; case OID_sha224WithRSAEncryption: ctx->cert->sig->hash_algo = "sha224"; goto rsa_pkcs1; case OID_id_ecdsa_with_sha1: ctx->cert->sig->hash_algo = "sha1"; goto ecdsa; case OID_id_rsassa_pkcs1_v1_5_with_sha3_256: ctx->cert->sig->hash_algo = "sha3-256"; goto rsa_pkcs1; case OID_id_rsassa_pkcs1_v1_5_with_sha3_384: ctx->cert->sig->hash_algo = "sha3-384"; goto rsa_pkcs1; case OID_id_rsassa_pkcs1_v1_5_with_sha3_512: ctx->cert->sig->hash_algo = "sha3-512"; goto rsa_pkcs1; case OID_id_ecdsa_with_sha224: ctx->cert->sig->hash_algo = "sha224"; goto ecdsa; case OID_id_ecdsa_with_sha256: ctx->cert->sig->hash_algo = "sha256"; goto ecdsa; case OID_id_ecdsa_with_sha384: ctx->cert->sig->hash_algo = "sha384"; goto ecdsa; case OID_id_ecdsa_with_sha512: ctx->cert->sig->hash_algo = "sha512"; goto ecdsa; case OID_id_ecdsa_with_sha3_256: ctx->cert->sig->hash_algo = "sha3-256"; goto ecdsa; case OID_id_ecdsa_with_sha3_384: ctx->cert->sig->hash_algo = "sha3-384"; goto ecdsa; case OID_id_ecdsa_with_sha3_512: ctx->cert->sig->hash_algo = "sha3-512"; goto ecdsa; case OID_gost2012Signature256: ctx->cert->sig->hash_algo = "streebog256"; goto ecrdsa; case OID_gost2012Signature512: ctx->cert->sig->hash_algo = "streebog512"; goto ecrdsa; } rsa_pkcs1: ctx->cert->sig->pkey_algo = "rsa"; ctx->cert->sig->encoding = "pkcs1"; ctx->sig_algo = ctx->last_oid; return 0; ecrdsa: ctx->cert->sig->pkey_algo = "ecrdsa"; ctx->cert->sig->encoding = "raw"; ctx->sig_algo = ctx->last_oid; return 0; ecdsa: ctx->cert->sig->pkey_algo = "ecdsa"; ctx->cert->sig->encoding = "x962"; ctx->sig_algo = ctx->last_oid; return 0; } /* * Note the whereabouts and type of the signature. */ int x509_note_signature(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("Signature: alg=%u, size=%zu\n", ctx->last_oid, vlen); /* * In X.509 certificates, the signature's algorithm is stored in two * places: inside the TBSCertificate (the data that is signed), and * alongside the signature. These *must* match. */ if (ctx->last_oid != ctx->sig_algo) { pr_warn("signatureAlgorithm (%u) differs from tbsCertificate.signature (%u)\n", ctx->last_oid, ctx->sig_algo); return -EINVAL; } if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0 || strcmp(ctx->cert->sig->pkey_algo, "ecrdsa") == 0 || strcmp(ctx->cert->sig->pkey_algo, "ecdsa") == 0) { /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) return -EBADMSG; value++; vlen--; } ctx->cert->raw_sig = value; ctx->cert->raw_sig_size = vlen; return 0; } /* * Note the certificate serial number */ int x509_note_serial(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; ctx->cert->raw_serial = value; ctx->cert->raw_serial_size = vlen; return 0; } /* * Note some of the name segments from which we'll fabricate a name. */ int x509_extract_name_segment(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; switch (ctx->last_oid) { case OID_commonName: ctx->cn_size = vlen; ctx->cn_offset = (unsigned long)value - ctx->data; break; case OID_organizationName: ctx->o_size = vlen; ctx->o_offset = (unsigned long)value - ctx->data; break; case OID_email_address: ctx->email_size = vlen; ctx->email_offset = (unsigned long)value - ctx->data; break; default: break; } return 0; } /* * Fabricate and save the issuer and subject names */ static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen, unsigned char tag, char **_name, size_t vlen) { const void *name, *data = (const void *)ctx->data; size_t namesize; char *buffer; if (*_name) return -EINVAL; /* Empty name string if no material */ if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) { buffer = kzalloc(1, GFP_KERNEL); if (!buffer) return -ENOMEM; goto done; } if (ctx->cn_size && ctx->o_size) { /* Consider combining O and CN, but use only the CN if it is * prefixed by the O, or a significant portion thereof. */ namesize = ctx->cn_size; name = data + ctx->cn_offset; if (ctx->cn_size >= ctx->o_size && memcmp(data + ctx->cn_offset, data + ctx->o_offset, ctx->o_size) == 0) goto single_component; if (ctx->cn_size >= 7 && ctx->o_size >= 7 && memcmp(data + ctx->cn_offset, data + ctx->o_offset, 7) == 0) goto single_component; buffer = kmalloc(ctx->o_size + 2 + ctx->cn_size + 1, GFP_KERNEL); if (!buffer) return -ENOMEM; memcpy(buffer, data + ctx->o_offset, ctx->o_size); buffer[ctx->o_size + 0] = ':'; buffer[ctx->o_size + 1] = ' '; memcpy(buffer + ctx->o_size + 2, data + ctx->cn_offset, ctx->cn_size); buffer[ctx->o_size + 2 + ctx->cn_size] = 0; goto done; } else if (ctx->cn_size) { namesize = ctx->cn_size; name = data + ctx->cn_offset; } else if (ctx->o_size) { namesize = ctx->o_size; name = data + ctx->o_offset; } else { namesize = ctx->email_size; name = data + ctx->email_offset; } single_component: buffer = kmalloc(namesize + 1, GFP_KERNEL); if (!buffer) return -ENOMEM; memcpy(buffer, name, namesize); buffer[namesize] = 0; done: *_name = buffer; ctx->cn_size = 0; ctx->o_size = 0; ctx->email_size = 0; return 0; } int x509_note_issuer(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; struct asymmetric_key_id *kid; ctx->cert->raw_issuer = value; ctx->cert->raw_issuer_size = vlen; if (!ctx->cert->sig->auth_ids[2]) { kid = asymmetric_key_generate_id(value, vlen, "", 0); if (IS_ERR(kid)) return PTR_ERR(kid); ctx->cert->sig->auth_ids[2] = kid; } return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->issuer, vlen); } int x509_note_subject(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; ctx->cert->raw_subject = value; ctx->cert->raw_subject_size = vlen; return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->subject, vlen); } /* * Extract the parameters for the public key */ int x509_note_params(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; /* * AlgorithmIdentifier is used three times in the x509, we should skip * first and ignore third, using second one which is after subject and * before subjectPublicKey. */ if (!ctx->cert->raw_subject || ctx->key) return 0; ctx->params = value - hdrlen; ctx->params_size = vlen + hdrlen; return 0; } /* * Extract the data for the public key algorithm */ int x509_extract_key_data(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; enum OID oid; ctx->key_algo = ctx->last_oid; switch (ctx->last_oid) { case OID_rsaEncryption: ctx->cert->pub->pkey_algo = "rsa"; break; case OID_gost2012PKey256: case OID_gost2012PKey512: ctx->cert->pub->pkey_algo = "ecrdsa"; break; case OID_id_ecPublicKey: if (parse_OID(ctx->params, ctx->params_size, &oid) != 0) return -EBADMSG; switch (oid) { case OID_id_prime192v1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p192"; break; case OID_id_prime256v1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p256"; break; case OID_id_ansip384r1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p384"; break; case OID_id_ansip521r1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p521"; break; default: return -ENOPKG; } break; default: return -ENOPKG; } /* Discard the BIT STRING metadata */ if (vlen < 1 || *(const u8 *)value != 0) return -EBADMSG; ctx->key = value + 1; ctx->key_size = vlen - 1; return 0; } /* The keyIdentifier in AuthorityKeyIdentifier SEQUENCE is tag(CONT,PRIM,0) */ #define SEQ_TAG_KEYID (ASN1_CONT << 6) /* * Process certificate extensions that are used to qualify the certificate. */ int x509_process_extension(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; struct asymmetric_key_id *kid; const unsigned char *v = value; pr_debug("Extension: %u\n", ctx->last_oid); if (ctx->last_oid == OID_subjectKeyIdentifier) { /* Get hold of the key fingerprint */ if (ctx->cert->skid || vlen < 3) return -EBADMSG; if (v[0] != ASN1_OTS || v[1] != vlen - 2) return -EBADMSG; v += 2; vlen -= 2; ctx->cert->raw_skid_size = vlen; ctx->cert->raw_skid = v; kid = asymmetric_key_generate_id(v, vlen, "", 0); if (IS_ERR(kid)) return PTR_ERR(kid); ctx->cert->skid = kid; pr_debug("subjkeyid %*phN\n", kid->len, kid->data); return 0; } if (ctx->last_oid == OID_keyUsage) { /* * Get hold of the keyUsage bit string * v[1] is the encoding size * (Expect either 0x02 or 0x03, making it 1 or 2 bytes) * v[2] is the number of unused bits in the bit string * (If >= 3 keyCertSign is missing when v[1] = 0x02) * v[3] and possibly v[4] contain the bit string * * From RFC 5280 4.2.1.3: * 0x04 is where keyCertSign lands in this bit string * 0x80 is where digitalSignature lands in this bit string */ if (v[0] != ASN1_BTS) return -EBADMSG; if (vlen < 4) return -EBADMSG; if (v[2] >= 8) return -EBADMSG; if (v[3] & 0x80) ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_DIGITALSIG; if (v[1] == 0x02 && v[2] <= 2 && (v[3] & 0x04)) ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_KEYCERTSIGN; else if (vlen > 4 && v[1] == 0x03 && (v[3] & 0x04)) ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_KEYCERTSIGN; return 0; } if (ctx->last_oid == OID_authorityKeyIdentifier) { /* Get hold of the CA key fingerprint */ ctx->raw_akid = v; ctx->raw_akid_size = vlen; return 0; } if (ctx->last_oid == OID_basicConstraints) { /* * Get hold of the basicConstraints * v[1] is the encoding size * (Expect 0x2 or greater, making it 1 or more bytes) * v[2] is the encoding type * (Expect an ASN1_BOOL for the CA) * v[3] is the contents of the ASN1_BOOL * (Expect 1 if the CA is TRUE) * vlen should match the entire extension size */ if (v[0] != (ASN1_CONS_BIT | ASN1_SEQ)) return -EBADMSG; if (vlen < 2) return -EBADMSG; if (v[1] != vlen - 2) return -EBADMSG; if (vlen >= 4 && v[1] != 0 && v[2] == ASN1_BOOL && v[3] == 1) ctx->cert->pub->key_eflags |= 1 << KEY_EFLAG_CA; return 0; } return 0; } /** * x509_decode_time - Decode an X.509 time ASN.1 object * @_t: The time to fill in * @hdrlen: The length of the object header * @tag: The object tag * @value: The object value * @vlen: The size of the object value * * Decode an ASN.1 universal time or generalised time field into a struct the * kernel can handle and check it for validity. The time is decoded thus: * * [RFC5280 §4.1.2.5] * CAs conforming to this profile MUST always encode certificate validity * dates through the year 2049 as UTCTime; certificate validity dates in * 2050 or later MUST be encoded as GeneralizedTime. Conforming * applications MUST be able to process validity dates that are encoded in * either UTCTime or GeneralizedTime. */ int x509_decode_time(time64_t *_t, size_t hdrlen, unsigned char tag, const unsigned char *value, size_t vlen) { static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; const unsigned char *p = value; unsigned year, mon, day, hour, min, sec, mon_len; #define dec2bin(X) ({ unsigned char x = (X) - '0'; if (x > 9) goto invalid_time; x; }) #define DD2bin(P) ({ unsigned x = dec2bin(P[0]) * 10 + dec2bin(P[1]); P += 2; x; }) if (tag == ASN1_UNITIM) { /* UTCTime: YYMMDDHHMMSSZ */ if (vlen != 13) goto unsupported_time; year = DD2bin(p); if (year >= 50) year += 1900; else year += 2000; } else if (tag == ASN1_GENTIM) { /* GenTime: YYYYMMDDHHMMSSZ */ if (vlen != 15) goto unsupported_time; year = DD2bin(p) * 100 + DD2bin(p); if (year >= 1950 && year <= 2049) goto invalid_time; } else { goto unsupported_time; } mon = DD2bin(p); day = DD2bin(p); hour = DD2bin(p); min = DD2bin(p); sec = DD2bin(p); if (*p != 'Z') goto unsupported_time; if (year < 1970 || mon < 1 || mon > 12) goto invalid_time; mon_len = month_lengths[mon - 1]; if (mon == 2) { if (year % 4 == 0) { mon_len = 29; if (year % 100 == 0) { mon_len = 28; if (year % 400 == 0) mon_len = 29; } } } if (day < 1 || day > mon_len || hour > 24 || /* ISO 8601 permits 24:00:00 as midnight tomorrow */ min > 59 || sec > 60) /* ISO 8601 permits leap seconds [X.680 46.3] */ goto invalid_time; *_t = mktime64(year, mon, day, hour, min, sec); return 0; unsupported_time: pr_debug("Got unsupported time [tag %02x]: '%*phN'\n", tag, (int)vlen, value); return -EBADMSG; invalid_time: pr_debug("Got invalid time [tag %02x]: '%*phN'\n", tag, (int)vlen, value); return -EBADMSG; } EXPORT_SYMBOL_GPL(x509_decode_time); int x509_note_not_before(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; return x509_decode_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen); } int x509_note_not_after(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; return x509_decode_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen); } /* * Note a key identifier-based AuthorityKeyIdentifier */ int x509_akid_note_kid(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; struct asymmetric_key_id *kid; pr_debug("AKID: keyid: %*phN\n", (int)vlen, value); if (ctx->cert->sig->auth_ids[1]) return 0; kid = asymmetric_key_generate_id(value, vlen, "", 0); if (IS_ERR(kid)) return PTR_ERR(kid); pr_debug("authkeyid %*phN\n", kid->len, kid->data); ctx->cert->sig->auth_ids[1] = kid; return 0; } /* * Note a directoryName in an AuthorityKeyIdentifier */ int x509_akid_note_name(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; pr_debug("AKID: name: %*phN\n", (int)vlen, value); ctx->akid_raw_issuer = value; ctx->akid_raw_issuer_size = vlen; return 0; } /* * Note a serial number in an AuthorityKeyIdentifier */ int x509_akid_note_serial(void *context, size_t hdrlen, unsigned char tag, const void *value, size_t vlen) { struct x509_parse_context *ctx = context; struct asymmetric_key_id *kid; pr_debug("AKID: serial: %*phN\n", (int)vlen, value); if (!ctx->akid_raw_issuer || ctx->cert->sig->auth_ids[0]) return 0; kid = asymmetric_key_generate_id(value, vlen, ctx->akid_raw_issuer, ctx->akid_raw_issuer_size); if (IS_ERR(kid)) return PTR_ERR(kid); pr_debug("authkeyid %*phN\n", kid->len, kid->data); ctx->cert->sig->auth_ids[0] = kid; return 0; } |
| 133 132 132 11 11 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Do sleep inside a spin-lock * Copyright (c) 1999 by Takashi Iwai <tiwai@suse.de> */ #include <linux/export.h> #include <sound/core.h> #include "seq_lock.h" /* wait until all locks are released */ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line) { int warn_count = 5 * HZ; if (atomic_read(lockp) < 0) { pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line); return; } while (atomic_read(lockp) > 0) { if (warn_count-- == 0) pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line); schedule_timeout_uninterruptible(1); } } EXPORT_SYMBOL(snd_use_lock_sync_helper); |
| 6 6 6 6 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat driver for Linux * * Copyright (c) 2010 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ /* * Module roccat is a char device used to report special events of roccat * hardware to userland. These events include requests for on-screen-display of * profile or dpi settings or requests for execution of macro sequences that are * not stored in device. The information in these events depends on hid device * implementation and contains data that is not available in a single hid event * or else hidraw could have been used. * It is inspired by hidraw, but uses only one circular buffer for all readers. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/cdev.h> #include <linux/poll.h> #include <linux/sched/signal.h> #include <linux/hid-roccat.h> #include <linux/module.h> #define ROCCAT_FIRST_MINOR 0 #define ROCCAT_MAX_DEVICES 8 /* should be a power of 2 for performance reason */ #define ROCCAT_CBUF_SIZE 16 struct roccat_report { uint8_t *value; }; struct roccat_device { unsigned int minor; int report_size; int open; int exist; wait_queue_head_t wait; struct device *dev; struct hid_device *hid; struct list_head readers; /* protects modifications of readers list */ struct mutex readers_lock; /* * circular_buffer has one writer and multiple readers with their own * read pointers */ struct roccat_report cbuf[ROCCAT_CBUF_SIZE]; int cbuf_end; struct mutex cbuf_lock; }; struct roccat_reader { struct list_head node; struct roccat_device *device; int cbuf_start; }; static int roccat_major; static struct cdev roccat_cdev; static struct roccat_device *devices[ROCCAT_MAX_DEVICES]; /* protects modifications of devices array */ static DEFINE_MUTEX(devices_lock); static ssize_t roccat_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct roccat_reader *reader = file->private_data; struct roccat_device *device = reader->device; struct roccat_report *report; ssize_t retval = 0, len; DECLARE_WAITQUEUE(wait, current); mutex_lock(&device->cbuf_lock); /* no data? */ if (reader->cbuf_start == device->cbuf_end) { add_wait_queue(&device->wait, &wait); set_current_state(TASK_INTERRUPTIBLE); /* wait for data */ while (reader->cbuf_start == device->cbuf_end) { if (file->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } if (!device->exist) { retval = -EIO; break; } mutex_unlock(&device->cbuf_lock); schedule(); mutex_lock(&device->cbuf_lock); set_current_state(TASK_INTERRUPTIBLE); } set_current_state(TASK_RUNNING); remove_wait_queue(&device->wait, &wait); } /* here we either have data or a reason to return if retval is set */ if (retval) goto exit_unlock; report = &device->cbuf[reader->cbuf_start]; /* * If report is larger than requested amount of data, rest of report * is lost! */ len = device->report_size > count ? count : device->report_size; if (copy_to_user(buffer, report->value, len)) { retval = -EFAULT; goto exit_unlock; } retval += len; reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; exit_unlock: mutex_unlock(&device->cbuf_lock); return retval; } static __poll_t roccat_poll(struct file *file, poll_table *wait) { struct roccat_reader *reader = file->private_data; poll_wait(file, &reader->device->wait, wait); if (reader->cbuf_start != reader->device->cbuf_end) return EPOLLIN | EPOLLRDNORM; if (!reader->device->exist) return EPOLLERR | EPOLLHUP; return 0; } static int roccat_open(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct roccat_reader *reader; struct roccat_device *device; int error = 0; reader = kzalloc(sizeof(struct roccat_reader), GFP_KERNEL); if (!reader) return -ENOMEM; mutex_lock(&devices_lock); device = devices[minor]; if (!device) { pr_emerg("roccat device with minor %d doesn't exist\n", minor); error = -ENODEV; goto exit_err_devices; } mutex_lock(&device->readers_lock); if (!device->open++) { /* power on device on adding first reader */ error = hid_hw_power(device->hid, PM_HINT_FULLON); if (error < 0) { --device->open; goto exit_err_readers; } error = hid_hw_open(device->hid); if (error < 0) { hid_hw_power(device->hid, PM_HINT_NORMAL); --device->open; goto exit_err_readers; } } reader->device = device; /* new reader doesn't get old events */ reader->cbuf_start = device->cbuf_end; list_add_tail(&reader->node, &device->readers); file->private_data = reader; exit_err_readers: mutex_unlock(&device->readers_lock); exit_err_devices: mutex_unlock(&devices_lock); if (error) kfree(reader); return error; } static int roccat_release(struct inode *inode, struct file *file) { unsigned int minor = iminor(inode); struct roccat_reader *reader = file->private_data; struct roccat_device *device; mutex_lock(&devices_lock); device = devices[minor]; if (!device) { mutex_unlock(&devices_lock); pr_emerg("roccat device with minor %d doesn't exist\n", minor); return -ENODEV; } mutex_lock(&device->readers_lock); list_del(&reader->node); mutex_unlock(&device->readers_lock); kfree(reader); if (!--device->open) { /* removing last reader */ if (device->exist) { hid_hw_power(device->hid, PM_HINT_NORMAL); hid_hw_close(device->hid); } else { kfree(device); } } mutex_unlock(&devices_lock); return 0; } /* * roccat_report_event() - output data to readers * @minor: minor device number returned by roccat_connect() * @data: pointer to data * * Return value is zero on success, a negative error code on failure. * * This is called from interrupt handler. */ int roccat_report_event(int minor, u8 const *data) { struct roccat_device *device; struct roccat_reader *reader; struct roccat_report *report; uint8_t *new_value; device = devices[minor]; new_value = kmemdup(data, device->report_size, GFP_ATOMIC); if (!new_value) return -ENOMEM; mutex_lock(&device->cbuf_lock); report = &device->cbuf[device->cbuf_end]; /* passing NULL is safe */ kfree(report->value); report->value = new_value; device->cbuf_end = (device->cbuf_end + 1) % ROCCAT_CBUF_SIZE; list_for_each_entry(reader, &device->readers, node) { /* * As we already inserted one element, the buffer can't be * empty. If start and end are equal, buffer is full and we * increase start, so that slow reader misses one event, but * gets the newer ones in the right order. */ if (reader->cbuf_start == device->cbuf_end) reader->cbuf_start = (reader->cbuf_start + 1) % ROCCAT_CBUF_SIZE; } mutex_unlock(&device->cbuf_lock); wake_up_interruptible(&device->wait); return 0; } EXPORT_SYMBOL_GPL(roccat_report_event); /* * roccat_connect() - create a char device for special event output * @class: the class thats used to create the device. Meant to hold device * specific sysfs attributes. * @hid: the hid device the char device should be connected to. * @report_size: size of reports * * Return value is minor device number in Range [0, ROCCAT_MAX_DEVICES] on * success, a negative error code on failure. */ int roccat_connect(const struct class *klass, struct hid_device *hid, int report_size) { unsigned int minor; struct roccat_device *device; int temp; device = kzalloc(sizeof(struct roccat_device), GFP_KERNEL); if (!device) return -ENOMEM; mutex_lock(&devices_lock); for (minor = 0; minor < ROCCAT_MAX_DEVICES; ++minor) { if (devices[minor]) continue; break; } if (minor < ROCCAT_MAX_DEVICES) { devices[minor] = device; } else { mutex_unlock(&devices_lock); kfree(device); return -EINVAL; } device->dev = device_create(klass, &hid->dev, MKDEV(roccat_major, minor), NULL, "%s%s%d", "roccat", hid->driver->name, minor); if (IS_ERR(device->dev)) { devices[minor] = NULL; mutex_unlock(&devices_lock); temp = PTR_ERR(device->dev); kfree(device); return temp; } mutex_unlock(&devices_lock); init_waitqueue_head(&device->wait); INIT_LIST_HEAD(&device->readers); mutex_init(&device->readers_lock); mutex_init(&device->cbuf_lock); device->minor = minor; device->hid = hid; device->exist = 1; device->cbuf_end = 0; device->report_size = report_size; return minor; } EXPORT_SYMBOL_GPL(roccat_connect); /* roccat_disconnect() - remove char device from hid device * @minor: the minor device number returned by roccat_connect() */ void roccat_disconnect(int minor) { struct roccat_device *device; mutex_lock(&devices_lock); device = devices[minor]; mutex_unlock(&devices_lock); device->exist = 0; /* TODO exist maybe not needed */ device_destroy(device->dev->class, MKDEV(roccat_major, minor)); mutex_lock(&devices_lock); devices[minor] = NULL; mutex_unlock(&devices_lock); if (device->open) { hid_hw_close(device->hid); wake_up_interruptible(&device->wait); } else { kfree(device); } } EXPORT_SYMBOL_GPL(roccat_disconnect); static long roccat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); struct roccat_device *device; unsigned int minor = iminor(inode); long retval = 0; mutex_lock(&devices_lock); device = devices[minor]; if (!device) { retval = -ENODEV; goto out; } switch (cmd) { case ROCCATIOCGREPSIZE: if (put_user(device->report_size, (int __user *)arg)) retval = -EFAULT; break; default: retval = -ENOTTY; } out: mutex_unlock(&devices_lock); return retval; } static const struct file_operations roccat_ops = { .owner = THIS_MODULE, .read = roccat_read, .poll = roccat_poll, .open = roccat_open, .release = roccat_release, .llseek = noop_llseek, .unlocked_ioctl = roccat_ioctl, }; static int __init roccat_init(void) { int retval; dev_t dev_id; retval = alloc_chrdev_region(&dev_id, ROCCAT_FIRST_MINOR, ROCCAT_MAX_DEVICES, "roccat"); if (retval < 0) { pr_warn("can't get major number\n"); goto error; } roccat_major = MAJOR(dev_id); cdev_init(&roccat_cdev, &roccat_ops); retval = cdev_add(&roccat_cdev, dev_id, ROCCAT_MAX_DEVICES); if (retval < 0) { pr_warn("cannot add cdev\n"); goto cleanup_alloc_chrdev_region; } return 0; cleanup_alloc_chrdev_region: unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES); error: return retval; } static void __exit roccat_exit(void) { dev_t dev_id = MKDEV(roccat_major, 0); cdev_del(&roccat_cdev); unregister_chrdev_region(dev_id, ROCCAT_MAX_DEVICES); } module_init(roccat_init); module_exit(roccat_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat char device"); MODULE_LICENSE("GPL v2"); |
| 15 15 8 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> #include <linux/netfilter/ipset/pfxlen.h> /* Prefixlen maps for fast conversions, by Jan Engelhardt. */ #ifdef E #undef E #endif #define PREFIXES_MAP \ E(0x00000000, 0x00000000, 0x00000000, 0x00000000), \ E(0x80000000, 0x00000000, 0x00000000, 0x00000000), \ E(0xC0000000, 0x00000000, 0x00000000, 0x00000000), \ E(0xE0000000, 0x00000000, 0x00000000, 0x00000000), \ E(0xF0000000, 0x00000000, 0x00000000, 0x00000000), \ E(0xF8000000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFC000000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFE000000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFF000000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFF800000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFC00000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFE00000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFF00000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFF80000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFC0000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFE0000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFF8000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFC000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFE000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFF000, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFF800, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFC00, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFE00, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFF00, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFF80, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFFC0, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFFE0, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFFF0, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFFF8, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFC, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFE, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0x80000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xC0000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xE0000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xF0000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xF8000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFC000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFE000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFF000000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFF800000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFC00000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFE00000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFF00000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFF80000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFC0000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFE0000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFF0000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFF8000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFC000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFE000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFF000, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFF800, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFC00, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFE00, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFF00, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFF80, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFC0, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFE0, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFF0, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFF8, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFC, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFE, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0x80000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x80000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xC0000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xE0000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF0000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xF8000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFC000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFE000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF000000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF800000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFC00000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFE00000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF00000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFF80000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFC0000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFE0000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF0000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFF8000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFC000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFE000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF000), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF800), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFC00), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFE00), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF00), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFF80), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFC0), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFE0), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF0), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFF8), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFC), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE), \ E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF), #define E(a, b, c, d) \ {.ip6 = { \ htonl(a), htonl(b), \ htonl(c), htonl(d), \ } } /* This table works for both IPv4 and IPv6; * just use prefixlen_netmask_map[prefixlength].ip. */ const union nf_inet_addr ip_set_netmask_map[] = { PREFIXES_MAP }; EXPORT_SYMBOL_GPL(ip_set_netmask_map); #undef E #define E(a, b, c, d) \ {.ip6 = { (__force __be32)a, (__force __be32)b, \ (__force __be32)c, (__force __be32)d, \ } } /* This table works for both IPv4 and IPv6; * just use prefixlen_hostmask_map[prefixlength].ip. */ const union nf_inet_addr ip_set_hostmask_map[] = { PREFIXES_MAP }; EXPORT_SYMBOL_GPL(ip_set_hostmask_map); /* Find the largest network which matches the range from left, in host order. */ u32 ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr) { u32 last; u8 i; for (i = 1; i < 32; i++) { if ((from & ip_set_hostmask(i)) != from) continue; last = from | ~ip_set_hostmask(i); if (!after(last, to)) { *cidr = i; return last; } } *cidr = 32; return from; } EXPORT_SYMBOL_GPL(ip_set_range_to_cidr); |
| 1691 1691 1688 10944 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_JUMP_LABEL_H #define _LINUX_JUMP_LABEL_H /* * Jump label support * * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com> * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra * * DEPRECATED API: * * The use of 'struct static_key' directly, is now DEPRECATED. In addition * static_key_{true,false}() is also DEPRECATED. IE DO NOT use the following: * * struct static_key false = STATIC_KEY_INIT_FALSE; * struct static_key true = STATIC_KEY_INIT_TRUE; * static_key_true() * static_key_false() * * The updated API replacements are: * * DEFINE_STATIC_KEY_TRUE(key); * DEFINE_STATIC_KEY_FALSE(key); * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count); * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count); * static_branch_likely() * static_branch_unlikely() * * Jump labels provide an interface to generate dynamic branches using * self-modifying code. Assuming toolchain and architecture support, if we * define a "key" that is initially false via "DEFINE_STATIC_KEY_FALSE(key)", * an "if (static_branch_unlikely(&key))" statement is an unconditional branch * (which defaults to false - and the true block is placed out of line). * Similarly, we can define an initially true key via * "DEFINE_STATIC_KEY_TRUE(key)", and use it in the same * "if (static_branch_unlikely(&key))", in which case we will generate an * unconditional branch to the out-of-line true branch. Keys that are * initially true or false can be using in both static_branch_unlikely() * and static_branch_likely() statements. * * At runtime we can change the branch target by setting the key * to true via a call to static_branch_enable(), or false using * static_branch_disable(). If the direction of the branch is switched by * these calls then we run-time modify the branch target via a * no-op -> jump or jump -> no-op conversion. For example, for an * initially false key that is used in an "if (static_branch_unlikely(&key))" * statement, setting the key to true requires us to patch in a jump * to the out-of-line of true branch. * * In addition to static_branch_{enable,disable}, we can also reference count * the key or branch direction via static_branch_{inc,dec}. Thus, * static_branch_inc() can be thought of as a 'make more true' and * static_branch_dec() as a 'make more false'. * * Since this relies on modifying code, the branch modifying functions * must be considered absolute slow paths (machine wide synchronization etc.). * OTOH, since the affected branches are unconditional, their runtime overhead * will be absolutely minimal, esp. in the default (off) case where the total * effect is a single NOP of appropriate size. The on case will patch in a jump * to the out-of-line block. * * When the control is directly exposed to userspace, it is prudent to delay the * decrement to avoid high frequency code modifications which can (and do) * cause significant performance degradation. Struct static_key_deferred and * static_key_slow_dec_deferred() provide for this. * * Lacking toolchain and or architecture support, static keys fall back to a * simple conditional branch. * * Additional babbling in: Documentation/staging/static-keys.rst */ #ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/compiler.h> #include <linux/cleanup.h> extern bool static_key_initialized; #define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized, \ "%s(): static key '%pS' used before call to jump_label_init()", \ __func__, (key)) struct static_key { atomic_t enabled; #ifdef CONFIG_JUMP_LABEL /* * Note: * To make anonymous unions work with old compilers, the static * initialization of them requires brackets. This creates a dependency * on the order of the struct with the initializers. If any fields * are added, STATIC_KEY_INIT_TRUE and STATIC_KEY_INIT_FALSE may need * to be modified. * * bit 0 => 1 if key is initially true * 0 if initially false * bit 1 => 1 if points to struct static_key_mod * 0 if points to struct jump_entry */ union { unsigned long type; struct jump_entry *entries; struct static_key_mod *next; }; #endif /* CONFIG_JUMP_LABEL */ }; #endif /* __ASSEMBLY__ */ #ifdef CONFIG_JUMP_LABEL #include <asm/jump_label.h> #ifndef __ASSEMBLY__ #ifdef CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE struct jump_entry { s32 code; s32 target; long key; // key may be far away from the core kernel under KASLR }; static inline unsigned long jump_entry_code(const struct jump_entry *entry) { return (unsigned long)&entry->code + entry->code; } static inline unsigned long jump_entry_target(const struct jump_entry *entry) { return (unsigned long)&entry->target + entry->target; } static inline struct static_key *jump_entry_key(const struct jump_entry *entry) { long offset = entry->key & ~3L; return (struct static_key *)((unsigned long)&entry->key + offset); } #else static inline unsigned long jump_entry_code(const struct jump_entry *entry) { return entry->code; } static inline unsigned long jump_entry_target(const struct jump_entry *entry) { return entry->target; } static inline struct static_key *jump_entry_key(const struct jump_entry *entry) { return (struct static_key *)((unsigned long)entry->key & ~3UL); } #endif static inline bool jump_entry_is_branch(const struct jump_entry *entry) { return (unsigned long)entry->key & 1UL; } static inline bool jump_entry_is_init(const struct jump_entry *entry) { return (unsigned long)entry->key & 2UL; } static inline void jump_entry_set_init(struct jump_entry *entry, bool set) { if (set) entry->key |= 2; else entry->key &= ~2; } static inline int jump_entry_size(struct jump_entry *entry) { #ifdef JUMP_LABEL_NOP_SIZE return JUMP_LABEL_NOP_SIZE; #else return arch_jump_entry_size(entry); #endif } #endif #endif #ifndef __ASSEMBLY__ enum jump_label_type { JUMP_LABEL_NOP = 0, JUMP_LABEL_JMP, }; struct module; #ifdef CONFIG_JUMP_LABEL #define JUMP_TYPE_FALSE 0UL #define JUMP_TYPE_TRUE 1UL #define JUMP_TYPE_LINKED 2UL #define JUMP_TYPE_MASK 3UL static __always_inline bool static_key_false(struct static_key *key) { return arch_static_branch(key, false); } static __always_inline bool static_key_true(struct static_key *key) { return !arch_static_branch(key, true); } extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; extern void jump_label_init(void); extern void jump_label_init_ro(void); extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type); extern bool arch_jump_label_transform_queue(struct jump_entry *entry, enum jump_label_type type); extern void arch_jump_label_transform_apply(void); extern int jump_label_text_reserved(void *start, void *end); extern bool static_key_slow_inc(struct static_key *key); extern bool static_key_fast_inc_not_disabled(struct static_key *key); extern void static_key_slow_dec(struct static_key *key); extern bool static_key_slow_inc_cpuslocked(struct static_key *key); extern void static_key_slow_dec_cpuslocked(struct static_key *key); extern int static_key_count(struct static_key *key); extern void static_key_enable(struct static_key *key); extern void static_key_disable(struct static_key *key); extern void static_key_enable_cpuslocked(struct static_key *key); extern void static_key_disable_cpuslocked(struct static_key *key); extern enum jump_label_type jump_label_init_type(struct jump_entry *entry); /* * We should be using ATOMIC_INIT() for initializing .enabled, but * the inclusion of atomic.h is problematic for inclusion of jump_label.h * in 'low-level' headers. Thus, we are initializing .enabled with a * raw value, but have added a BUILD_BUG_ON() to catch any issues in * jump_label_init() see: kernel/jump_label.c. */ #define STATIC_KEY_INIT_TRUE \ { .enabled = { 1 }, \ { .type = JUMP_TYPE_TRUE } } #define STATIC_KEY_INIT_FALSE \ { .enabled = { 0 }, \ { .type = JUMP_TYPE_FALSE } } #else /* !CONFIG_JUMP_LABEL */ #include <linux/atomic.h> #include <linux/bug.h> static __always_inline int static_key_count(struct static_key *key) { return raw_atomic_read(&key->enabled); } static __always_inline void jump_label_init(void) { static_key_initialized = true; } static __always_inline void jump_label_init_ro(void) { } static __always_inline bool static_key_false(struct static_key *key) { if (unlikely_notrace(static_key_count(key) > 0)) return true; return false; } static __always_inline bool static_key_true(struct static_key *key) { if (likely_notrace(static_key_count(key) > 0)) return true; return false; } static inline bool static_key_fast_inc_not_disabled(struct static_key *key) { int v; STATIC_KEY_CHECK_USE(key); /* * Prevent key->enabled getting negative to follow the same semantics * as for CONFIG_JUMP_LABEL=y, see kernel/jump_label.c comment. */ v = atomic_read(&key->enabled); do { if (v < 0 || (v + 1) < 0) return false; } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1))); return true; } #define static_key_slow_inc(key) static_key_fast_inc_not_disabled(key) static inline void static_key_slow_dec(struct static_key *key) { STATIC_KEY_CHECK_USE(key); atomic_dec(&key->enabled); } #define static_key_slow_inc_cpuslocked(key) static_key_slow_inc(key) #define static_key_slow_dec_cpuslocked(key) static_key_slow_dec(key) static inline int jump_label_text_reserved(void *start, void *end) { return 0; } static inline void jump_label_lock(void) {} static inline void jump_label_unlock(void) {} static inline void static_key_enable(struct static_key *key) { STATIC_KEY_CHECK_USE(key); if (atomic_read(&key->enabled) != 0) { WARN_ON_ONCE(atomic_read(&key->enabled) != 1); return; } atomic_set(&key->enabled, 1); } static inline void static_key_disable(struct static_key *key) { STATIC_KEY_CHECK_USE(key); if (atomic_read(&key->enabled) != 1) { WARN_ON_ONCE(atomic_read(&key->enabled) != 0); return; } atomic_set(&key->enabled, 0); } #define static_key_enable_cpuslocked(k) static_key_enable((k)) #define static_key_disable_cpuslocked(k) static_key_disable((k)) #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) } #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) } #endif /* CONFIG_JUMP_LABEL */ DEFINE_LOCK_GUARD_0(jump_label_lock, jump_label_lock(), jump_label_unlock()) #define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE #define jump_label_enabled static_key_enabled /* -------------------------------------------------------------------------- */ /* * Two type wrappers around static_key, such that we can use compile time * type differentiation to emit the right code. * * All the below code is macros in order to play type games. */ struct static_key_true { struct static_key key; }; struct static_key_false { struct static_key key; }; #define STATIC_KEY_TRUE_INIT (struct static_key_true) { .key = STATIC_KEY_INIT_TRUE, } #define STATIC_KEY_FALSE_INIT (struct static_key_false){ .key = STATIC_KEY_INIT_FALSE, } #define DEFINE_STATIC_KEY_TRUE(name) \ struct static_key_true name = STATIC_KEY_TRUE_INIT #define DEFINE_STATIC_KEY_TRUE_RO(name) \ struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT #define DECLARE_STATIC_KEY_TRUE(name) \ extern struct static_key_true name #define DEFINE_STATIC_KEY_FALSE(name) \ struct static_key_false name = STATIC_KEY_FALSE_INIT #define DEFINE_STATIC_KEY_FALSE_RO(name) \ struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT #define DECLARE_STATIC_KEY_FALSE(name) \ extern struct static_key_false name #define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \ struct static_key_true name[count] = { \ [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \ } #define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \ struct static_key_false name[count] = { \ [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \ } #define _DEFINE_STATIC_KEY_1(name) DEFINE_STATIC_KEY_TRUE(name) #define _DEFINE_STATIC_KEY_0(name) DEFINE_STATIC_KEY_FALSE(name) #define DEFINE_STATIC_KEY_MAYBE(cfg, name) \ __PASTE(_DEFINE_STATIC_KEY_, IS_ENABLED(cfg))(name) #define _DEFINE_STATIC_KEY_RO_1(name) DEFINE_STATIC_KEY_TRUE_RO(name) #define _DEFINE_STATIC_KEY_RO_0(name) DEFINE_STATIC_KEY_FALSE_RO(name) #define DEFINE_STATIC_KEY_MAYBE_RO(cfg, name) \ __PASTE(_DEFINE_STATIC_KEY_RO_, IS_ENABLED(cfg))(name) #define _DECLARE_STATIC_KEY_1(name) DECLARE_STATIC_KEY_TRUE(name) #define _DECLARE_STATIC_KEY_0(name) DECLARE_STATIC_KEY_FALSE(name) #define DECLARE_STATIC_KEY_MAYBE(cfg, name) \ __PASTE(_DECLARE_STATIC_KEY_, IS_ENABLED(cfg))(name) extern bool ____wrong_branch_error(void); #define static_key_enabled(x) \ ({ \ if (!__builtin_types_compatible_p(typeof(*x), struct static_key) && \ !__builtin_types_compatible_p(typeof(*x), struct static_key_true) &&\ !__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ ____wrong_branch_error(); \ static_key_count((struct static_key *)x) > 0; \ }) #ifdef CONFIG_JUMP_LABEL /* * Combine the right initial value (type) with the right branch order * to generate the desired result. * * * type\branch| likely (1) | unlikely (0) * -----------+-----------------------+------------------ * | | * true (1) | ... | ... * | NOP | JMP L * | <br-stmts> | 1: ... * | L: ... | * | | * | | L: <br-stmts> * | | jmp 1b * | | * -----------+-----------------------+------------------ * | | * false (0) | ... | ... * | JMP L | NOP * | <br-stmts> | 1: ... * | L: ... | * | | * | | L: <br-stmts> * | | jmp 1b * | | * -----------+-----------------------+------------------ * * The initial value is encoded in the LSB of static_key::entries, * type: 0 = false, 1 = true. * * The branch type is encoded in the LSB of jump_entry::key, * branch: 0 = unlikely, 1 = likely. * * This gives the following logic table: * * enabled type branch instuction * -----------------------------+----------- * 0 0 0 | NOP * 0 0 1 | JMP * 0 1 0 | NOP * 0 1 1 | JMP * * 1 0 0 | JMP * 1 0 1 | NOP * 1 1 0 | JMP * 1 1 1 | NOP * * Which gives the following functions: * * dynamic: instruction = enabled ^ branch * static: instruction = type ^ branch * * See jump_label_type() / jump_label_init_type(). */ #define static_branch_likely(x) \ ({ \ bool branch; \ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \ branch = !arch_static_branch(&(x)->key, true); \ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ branch = !arch_static_branch_jump(&(x)->key, true); \ else \ branch = ____wrong_branch_error(); \ likely_notrace(branch); \ }) #define static_branch_unlikely(x) \ ({ \ bool branch; \ if (__builtin_types_compatible_p(typeof(*x), struct static_key_true)) \ branch = arch_static_branch_jump(&(x)->key, false); \ else if (__builtin_types_compatible_p(typeof(*x), struct static_key_false)) \ branch = arch_static_branch(&(x)->key, false); \ else \ branch = ____wrong_branch_error(); \ unlikely_notrace(branch); \ }) #else /* !CONFIG_JUMP_LABEL */ #define static_branch_likely(x) likely_notrace(static_key_enabled(&(x)->key)) #define static_branch_unlikely(x) unlikely_notrace(static_key_enabled(&(x)->key)) #endif /* CONFIG_JUMP_LABEL */ #define static_branch_maybe(config, x) \ (IS_ENABLED(config) ? static_branch_likely(x) \ : static_branch_unlikely(x)) /* * Advanced usage; refcount, branch is enabled when: count != 0 */ #define static_branch_inc(x) static_key_slow_inc(&(x)->key) #define static_branch_dec(x) static_key_slow_dec(&(x)->key) #define static_branch_inc_cpuslocked(x) static_key_slow_inc_cpuslocked(&(x)->key) #define static_branch_dec_cpuslocked(x) static_key_slow_dec_cpuslocked(&(x)->key) /* * Normal usage; boolean enable/disable. */ #define static_branch_enable(x) static_key_enable(&(x)->key) #define static_branch_disable(x) static_key_disable(&(x)->key) #define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key) #define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key) #endif /* __ASSEMBLY__ */ #endif /* _LINUX_JUMP_LABEL_H */ |
| 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * NET Generic infrastructure for Network protocols. * * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> */ #ifndef _TIMEWAIT_SOCK_H #define _TIMEWAIT_SOCK_H #include <linux/slab.h> #include <linux/bug.h> #include <net/sock.h> struct timewait_sock_ops { struct kmem_cache *twsk_slab; char *twsk_slab_name; unsigned int twsk_obj_size; void (*twsk_destructor)(struct sock *sk); }; static inline void twsk_destructor(struct sock *sk) { if (sk->sk_prot->twsk_prot->twsk_destructor != NULL) sk->sk_prot->twsk_prot->twsk_destructor(sk); } #endif /* _TIMEWAIT_SOCK_H */ |
| 34 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 | // SPDX-License-Identifier: GPL-2.0-only /* * Software WEP encryption implementation * Copyright 2002, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright 2003, Instant802 Networks, Inc. * Copyright (C) 2023 Intel Corporation */ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/random.h> #include <linux/compiler.h> #include <linux/crc32.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/unaligned.h> #include <net/mac80211.h> #include "ieee80211_i.h" #include "wep.h" void ieee80211_wep_init(struct ieee80211_local *local) { /* start WEP IV from a random value */ get_random_bytes(&local->wep_iv, IEEE80211_WEP_IV_LEN); } static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) { /* * Fluhrer, Mantin, and Shamir have reported weaknesses in the * key scheduling algorithm of RC4. At least IVs (KeyByte + 3, * 0xff, N) can be used to speedup attacks, so avoid using them. */ if ((iv & 0xff00) == 0xff00) { u8 B = (iv >> 16) & 0xff; if (B >= 3 && B < 3 + keylen) return true; } return false; } static void ieee80211_wep_get_iv(struct ieee80211_local *local, int keylen, int keyidx, u8 *iv) { local->wep_iv++; if (ieee80211_wep_weak_iv(local->wep_iv, keylen)) local->wep_iv += 0x0100; if (!iv) return; *iv++ = (local->wep_iv >> 16) & 0xff; *iv++ = (local->wep_iv >> 8) & 0xff; *iv++ = local->wep_iv & 0xff; *iv++ = keyidx << 6; } static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, struct sk_buff *skb, int keylen, int keyidx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int hdrlen; u8 *newhdr; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) return NULL; hdrlen = ieee80211_hdrlen(hdr->frame_control); newhdr = skb_push(skb, IEEE80211_WEP_IV_LEN); memmove(newhdr, newhdr + IEEE80211_WEP_IV_LEN, hdrlen); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return newhdr + hdrlen; ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen); return newhdr + hdrlen; } static void ieee80211_wep_remove_iv(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; unsigned int hdrlen; hdrlen = ieee80211_hdrlen(hdr->frame_control); memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_WEP_IV_LEN); } /* Perform WEP encryption using given key. data buffer must have tailroom * for 4-byte ICV. data_len must not include this ICV. Note: this function * does _not_ add IV. data = RC4(data | CRC32(data)) */ int ieee80211_wep_encrypt_data(struct arc4_ctx *ctx, u8 *rc4key, size_t klen, u8 *data, size_t data_len) { __le32 icv; icv = cpu_to_le32(~crc32_le(~0, data, data_len)); put_unaligned(icv, (__le32 *)(data + data_len)); arc4_setkey(ctx, rc4key, klen); arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN); memzero_explicit(ctx, sizeof(*ctx)); return 0; } /* Perform WEP encryption on given skb. 4 bytes of extra space (IV) in the * beginning of the buffer 4 bytes of extra space (ICV) in the end of the * buffer will be added. Both IV and ICV will be transmitted, so the * payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb, const u8 *key, int keylen, int keyidx) { u8 *iv; size_t len; u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN)) return -1; iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); if (!iv) return -1; len = skb->len - (iv + IEEE80211_WEP_IV_LEN - skb->data); /* Prepend 24-bit IV to RC4 key */ memcpy(rc4key, iv, 3); /* Copy rest of the WEP key (the secret part) */ memcpy(rc4key + 3, key, keylen); /* Add room for ICV */ skb_put(skb, IEEE80211_WEP_ICV_LEN); return ieee80211_wep_encrypt_data(&local->wep_tx_ctx, rc4key, keylen + 3, iv + IEEE80211_WEP_IV_LEN, len); } /* Perform WEP decryption using given key. data buffer includes encrypted * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. * Return 0 on success and -1 on ICV mismatch. */ int ieee80211_wep_decrypt_data(struct arc4_ctx *ctx, u8 *rc4key, size_t klen, u8 *data, size_t data_len) { __le32 crc; arc4_setkey(ctx, rc4key, klen); arc4_crypt(ctx, data, data, data_len + IEEE80211_WEP_ICV_LEN); memzero_explicit(ctx, sizeof(*ctx)); crc = cpu_to_le32(~crc32_le(~0, data, data_len)); if (memcmp(&crc, data + data_len, IEEE80211_WEP_ICV_LEN) != 0) /* ICV mismatch */ return -1; return 0; } /* Perform WEP decryption on given skb. Buffer includes whole WEP part of * the frame: IV (4 bytes), encrypted payload (including SNAP header), * ICV (4 bytes). skb->len includes both IV and ICV. * * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on * failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload * is moved to the beginning of the skb and skb length will be reduced. */ static int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_key *key) { u32 klen; u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; u8 keyidx; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; unsigned int hdrlen; size_t len; int ret = 0; if (!ieee80211_has_protected(hdr->frame_control)) return -1; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen + IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN) return -1; len = skb->len - hdrlen - IEEE80211_WEP_IV_LEN - IEEE80211_WEP_ICV_LEN; keyidx = skb->data[hdrlen + 3] >> 6; if (!key || keyidx != key->conf.keyidx) return -1; klen = 3 + key->conf.keylen; /* Prepend 24-bit IV to RC4 key */ memcpy(rc4key, skb->data + hdrlen, 3); /* Copy rest of the WEP key (the secret part) */ memcpy(rc4key + 3, key->conf.key, key->conf.keylen); if (ieee80211_wep_decrypt_data(&local->wep_rx_ctx, rc4key, klen, skb->data + hdrlen + IEEE80211_WEP_IV_LEN, len)) ret = -1; /* Trim ICV */ skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN); /* Remove IV */ memmove(skb->data + IEEE80211_WEP_IV_LEN, skb->data, hdrlen); skb_pull(skb, IEEE80211_WEP_IV_LEN); return ret; } ieee80211_rx_result ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; if (!ieee80211_is_data(fc) && !ieee80211_is_auth(fc)) return RX_CONTINUE; if (!(status->flag & RX_FLAG_DECRYPTED)) { if (skb_linearize(rx->skb)) return RX_DROP_U_OOM; if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) return RX_DROP_U_WEP_DEC_FAIL; } else if (!(status->flag & RX_FLAG_IV_STRIPPED)) { if (!pskb_may_pull(rx->skb, ieee80211_hdrlen(fc) + IEEE80211_WEP_IV_LEN)) return RX_DROP_U_NO_IV; ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key); /* remove ICV */ if (!(status->flag & RX_FLAG_ICV_STRIPPED) && pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN)) return RX_DROP_U_NO_ICV; } return RX_CONTINUE; } static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_key_conf *hw_key = info->control.hw_key; if (!hw_key) { if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, tx->key->conf.keylen, tx->key->conf.keyidx)) return -1; } else if ((hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) || (hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { if (!ieee80211_wep_add_iv(tx->local, skb, tx->key->conf.keylen, tx->key->conf.keyidx)) return -1; } return 0; } ieee80211_tx_result ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (wep_encrypt_skb(tx, skb) < 0) { I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); return TX_DROP; } } return TX_CONTINUE; } |
| 84 83 84 84 77 3 290 176 176 275 277 277 177 177 290 259 290 287 290 4 290 291 4 2 2 4 4 114 2 322 300 22 23 15 9 7 7 278 1 276 1 291 293 177 32 32 31 135 133 129 15 130 177 177 177 176 297 148 34 114 5 290 289 290 290 290 287 287 290 282 271 271 285 189 189 189 185 187 187 146 145 146 187 189 38 89 89 45 44 303 301 303 289 46 282 278 7 288 289 280 3 289 7 216 42 9 7 217 190 187 191 217 189 217 2 189 43 7 43 38 38 34 38 34 34 225 272 22 21 6 259 271 223 18 18 222 4 4 223 222 147 147 147 223 221 223 146 147 13 14 147 37 147 9 9 221 223 216 19 19 19 5 19 56 44 44 56 44 44 44 56 5 5 5 5 5 5 5 5 5 5 38 331 332 332 148 329 287 42 12 326 325 56 323 7 332 60 4 54 59 17 304 33 17 314 8 8 308 305 1 316 411 146 4 146 145 146 124 38 308 18 23 21 18 15 2 6 7 13 315 303 326 304 314 2 15 15 15 15 222 221 146 147 147 272 10 223 9 223 9 9 9 223 223 223 223 223 9 5 222 5 5 5 5 222 278 204 205 4 147 146 146 143 143 6 6 6 217 215 4 4 4 4 4 217 137 137 4 4 4 264 217 134 264 72 82 82 82 29 29 29 29 72 248 168 82 248 9 278 277 275 274 277 277 276 277 275 277 148 148 5 147 148 148 148 147 148 146 148 287 288 11 43 36 27 27 4 287 288 42 42 40 42 40 42 42 42 42 5 41 41 42 1 42 42 38 38 38 37 135 135 38 135 38 1 56 303 303 301 68 1 300 294 1 294 293 6 291 295 289 2 287 5 1 285 3 283 282 282 3 277 1 277 61 277 278 278 1 277 24 281 281 279 1 17 1 1 278 279 1 279 3 279 134 56 135 134 153 147 283 280 31 288 292 303 7 16 16 14 5 3 11 10 1 9 5 5 8 9 7 5 2 2 1 1 2 2 1 1 1 2 9 16 15 15 13 2 11 10 1 9 4 9 9 5 3 2 2 2 9 15 4 5 5 5 5 4 4 4 4 1 4 4 4 4 4 5 18 2 16 15 1 15 1 14 6 13 5 8 13 12 11 11 5 9 7 7 6 6 2 5 7 7 10 18 12 12 12 12 12 12 1 1 12 12 12 285 285 285 11 11 11 284 3 3 3 10 8 1 7 3 6 5 1 1 5 2 3 2 23 23 23 21 21 10 13 13 12 1 1 11 1 10 1 1 10 10 10 10 5 5 13 10 13 13 3 13 1 12 11 1 10 4 8 2 6 8 7 6 5 5 3 2 2 2 9 13 232 233 230 84 73 73 72 73 230 117 16 230 15 8 8 215 31 17 99 39 7 39 7 2 2 2 2 5 5 5 5 5 39 39 7 32 42 42 42 137 45 45 45 44 45 88 89 1 89 82 81 54 54 88 88 41 42 1 42 41 29 41 42 90 91 90 75 91 91 161 91 18 90 1 160 160 160 91 91 91 91 76 77 75 77 77 15 15 79 79 57 79 79 9 9 79 8 81 17 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/sched/cls_api.c Packet classifier API. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/jhash.h> #include <linux/rculist.h> #include <linux/rhashtable.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_mirred.h> #include <net/tc_act/tc_vlan.h> #include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_csum.h> #include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_police.h> #include <net/tc_act/tc_sample.h> #include <net/tc_act/tc_skbedit.h> #include <net/tc_act/tc_ct.h> #include <net/tc_act/tc_mpls.h> #include <net/tc_act/tc_gate.h> #include <net/flow_offload.h> #include <net/tc_wrapper.h> /* The list of all installed classifier types */ static LIST_HEAD(tcf_proto_base); /* Protects list of registered TC modules. It is pure SMP lock. */ static DEFINE_RWLOCK(cls_mod_lock); static struct xarray tcf_exts_miss_cookies_xa; struct tcf_exts_miss_cookie_node { const struct tcf_chain *chain; const struct tcf_proto *tp; const struct tcf_exts *exts; u32 chain_index; u32 tp_prio; u32 handle; u32 miss_cookie_base; struct rcu_head rcu; }; /* Each tc action entry cookie will be comprised of 32bit miss_cookie_base + * action index in the exts tc actions array. */ union tcf_exts_miss_cookie { struct { u32 miss_cookie_base; u32 act_index; }; u64 miss_cookie; }; #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) static int tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, u32 handle) { struct tcf_exts_miss_cookie_node *n; static u32 next; int err; if (WARN_ON(!handle || !tp->ops->get_exts)) return -EINVAL; n = kzalloc(sizeof(*n), GFP_KERNEL); if (!n) return -ENOMEM; n->chain_index = tp->chain->index; n->chain = tp->chain; n->tp_prio = tp->prio; n->tp = tp; n->exts = exts; n->handle = handle; err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base, n, xa_limit_32b, &next, GFP_KERNEL); if (err < 0) goto err_xa_alloc; exts->miss_cookie_node = n; return 0; err_xa_alloc: kfree(n); return err; } static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts) { struct tcf_exts_miss_cookie_node *n; if (!exts->miss_cookie_node) return; n = exts->miss_cookie_node; xa_erase(&tcf_exts_miss_cookies_xa, n->miss_cookie_base); kfree_rcu(n, rcu); } static struct tcf_exts_miss_cookie_node * tcf_exts_miss_cookie_lookup(u64 miss_cookie, int *act_index) { union tcf_exts_miss_cookie mc = { .miss_cookie = miss_cookie, }; *act_index = mc.act_index; return xa_load(&tcf_exts_miss_cookies_xa, mc.miss_cookie_base); } #else /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */ static int tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp, u32 handle) { return 0; } static void tcf_exts_miss_cookie_base_destroy(struct tcf_exts *exts) { } #endif /* IS_ENABLED(CONFIG_NET_TC_SKB_EXT) */ static u64 tcf_exts_miss_cookie_get(u32 miss_cookie_base, int act_index) { union tcf_exts_miss_cookie mc = { .act_index = act_index, }; if (!miss_cookie_base) return 0; mc.miss_cookie_base = miss_cookie_base; return mc.miss_cookie; } #ifdef CONFIG_NET_CLS_ACT DEFINE_STATIC_KEY_FALSE(tc_skb_ext_tc); EXPORT_SYMBOL(tc_skb_ext_tc); void tc_skb_ext_tc_enable(void) { static_branch_inc(&tc_skb_ext_tc); } EXPORT_SYMBOL(tc_skb_ext_tc_enable); void tc_skb_ext_tc_disable(void) { static_branch_dec(&tc_skb_ext_tc); } EXPORT_SYMBOL(tc_skb_ext_tc_disable); #endif static u32 destroy_obj_hashfn(const struct tcf_proto *tp) { return jhash_3words(tp->chain->index, tp->prio, (__force __u32)tp->protocol, 0); } static void tcf_proto_signal_destroying(struct tcf_chain *chain, struct tcf_proto *tp) { struct tcf_block *block = chain->block; mutex_lock(&block->proto_destroy_lock); hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, destroy_obj_hashfn(tp)); mutex_unlock(&block->proto_destroy_lock); } static bool tcf_proto_cmp(const struct tcf_proto *tp1, const struct tcf_proto *tp2) { return tp1->chain->index == tp2->chain->index && tp1->prio == tp2->prio && tp1->protocol == tp2->protocol; } static bool tcf_proto_exists_destroying(struct tcf_chain *chain, struct tcf_proto *tp) { u32 hash = destroy_obj_hashfn(tp); struct tcf_proto *iter; bool found = false; rcu_read_lock(); hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, destroy_ht_node, hash) { if (tcf_proto_cmp(tp, iter)) { found = true; break; } } rcu_read_unlock(); return found; } static void tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp) { struct tcf_block *block = chain->block; mutex_lock(&block->proto_destroy_lock); if (hash_hashed(&tp->destroy_ht_node)) hash_del_rcu(&tp->destroy_ht_node); mutex_unlock(&block->proto_destroy_lock); } /* Find classifier type by string name */ static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) { const struct tcf_proto_ops *t, *res = NULL; if (kind) { read_lock(&cls_mod_lock); list_for_each_entry(t, &tcf_proto_base, head) { if (strcmp(kind, t->kind) == 0) { if (try_module_get(t->owner)) res = t; break; } } read_unlock(&cls_mod_lock); } return res; } static const struct tcf_proto_ops * tcf_proto_lookup_ops(const char *kind, bool rtnl_held, struct netlink_ext_ack *extack) { const struct tcf_proto_ops *ops; ops = __tcf_proto_lookup_ops(kind); if (ops) return ops; #ifdef CONFIG_MODULES if (rtnl_held) rtnl_unlock(); request_module(NET_CLS_ALIAS_PREFIX "%s", kind); if (rtnl_held) rtnl_lock(); ops = __tcf_proto_lookup_ops(kind); /* We dropped the RTNL semaphore in order to perform * the module load. So, even if we succeeded in loading * the module we have to replay the request. We indicate * this using -EAGAIN. */ if (ops) { module_put(ops->owner); return ERR_PTR(-EAGAIN); } #endif NL_SET_ERR_MSG(extack, "TC classifier not found"); return ERR_PTR(-ENOENT); } /* Register(unregister) new classifier type */ int register_tcf_proto_ops(struct tcf_proto_ops *ops) { struct tcf_proto_ops *t; int rc = -EEXIST; write_lock(&cls_mod_lock); list_for_each_entry(t, &tcf_proto_base, head) if (!strcmp(ops->kind, t->kind)) goto out; list_add_tail(&ops->head, &tcf_proto_base); rc = 0; out: write_unlock(&cls_mod_lock); return rc; } EXPORT_SYMBOL(register_tcf_proto_ops); static struct workqueue_struct *tc_filter_wq; void unregister_tcf_proto_ops(struct tcf_proto_ops *ops) { struct tcf_proto_ops *t; int rc = -ENOENT; /* Wait for outstanding call_rcu()s, if any, from a * tcf_proto_ops's destroy() handler. */ rcu_barrier(); flush_workqueue(tc_filter_wq); write_lock(&cls_mod_lock); list_for_each_entry(t, &tcf_proto_base, head) { if (t == ops) { list_del(&t->head); rc = 0; break; } } write_unlock(&cls_mod_lock); WARN(rc, "unregister tc filter kind(%s) failed %d\n", ops->kind, rc); } EXPORT_SYMBOL(unregister_tcf_proto_ops); bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) { INIT_RCU_WORK(rwork, func); return queue_rcu_work(tc_filter_wq, rwork); } EXPORT_SYMBOL(tcf_queue_work); /* Select new prio value from the range, managed by kernel. */ static inline u32 tcf_auto_prio(struct tcf_proto *tp) { u32 first = TC_H_MAKE(0xC0000000U, 0U); if (tp) first = tp->prio - 1; return TC_H_MAJ(first); } static bool tcf_proto_check_kind(struct nlattr *kind, char *name) { if (kind) return nla_strscpy(name, kind, IFNAMSIZ) < 0; memset(name, 0, IFNAMSIZ); return false; } static bool tcf_proto_is_unlocked(const char *kind) { const struct tcf_proto_ops *ops; bool ret; if (strlen(kind) == 0) return false; ops = tcf_proto_lookup_ops(kind, false, NULL); /* On error return false to take rtnl lock. Proto lookup/create * functions will perform lookup again and properly handle errors. */ if (IS_ERR(ops)) return false; ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED); module_put(ops->owner); return ret; } static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol, u32 prio, struct tcf_chain *chain, bool rtnl_held, struct netlink_ext_ack *extack) { struct tcf_proto *tp; int err; tp = kzalloc(sizeof(*tp), GFP_KERNEL); if (!tp) return ERR_PTR(-ENOBUFS); tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack); if (IS_ERR(tp->ops)) { err = PTR_ERR(tp->ops); goto errout; } tp->classify = tp->ops->classify; tp->protocol = protocol; tp->prio = prio; tp->chain = chain; tp->usesw = !tp->ops->reoffload; spin_lock_init(&tp->lock); refcount_set(&tp->refcnt, 1); err = tp->ops->init(tp); if (err) { module_put(tp->ops->owner); goto errout; } return tp; errout: kfree(tp); return ERR_PTR(err); } static void tcf_proto_get(struct tcf_proto *tp) { refcount_inc(&tp->refcnt); } static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add) { #ifdef CONFIG_NET_CLS_ACT struct tcf_block *block = tp->chain->block; bool counted = false; if (!add) { if (tp->usesw && tp->counted) { if (!atomic_dec_return(&block->useswcnt)) static_branch_dec(&tcf_sw_enabled_key); tp->counted = false; } return; } spin_lock(&tp->lock); if (tp->usesw && !tp->counted) { counted = true; tp->counted = true; } spin_unlock(&tp->lock); if (counted && atomic_inc_return(&block->useswcnt) == 1) static_branch_inc(&tcf_sw_enabled_key); #endif } static void tcf_chain_put(struct tcf_chain *chain); static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, bool sig_destroy, struct netlink_ext_ack *extack) { tp->ops->destroy(tp, rtnl_held, extack); tcf_proto_count_usesw(tp, false); if (sig_destroy) tcf_proto_signal_destroyed(tp->chain, tp); tcf_chain_put(tp->chain); module_put(tp->ops->owner); kfree_rcu(tp, rcu); } static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held, struct netlink_ext_ack *extack) { if (refcount_dec_and_test(&tp->refcnt)) tcf_proto_destroy(tp, rtnl_held, true, extack); } static bool tcf_proto_check_delete(struct tcf_proto *tp) { if (tp->ops->delete_empty) return tp->ops->delete_empty(tp); tp->deleting = true; return tp->deleting; } static void tcf_proto_mark_delete(struct tcf_proto *tp) { spin_lock(&tp->lock); tp->deleting = true; spin_unlock(&tp->lock); } static bool tcf_proto_is_deleting(struct tcf_proto *tp) { bool deleting; spin_lock(&tp->lock); deleting = tp->deleting; spin_unlock(&tp->lock); return deleting; } #define ASSERT_BLOCK_LOCKED(block) \ lockdep_assert_held(&(block)->lock) struct tcf_filter_chain_list_item { struct list_head list; tcf_chain_head_change_t *chain_head_change; void *chain_head_change_priv; }; static struct tcf_chain *tcf_chain_create(struct tcf_block *block, u32 chain_index) { struct tcf_chain *chain; ASSERT_BLOCK_LOCKED(block); chain = kzalloc(sizeof(*chain), GFP_KERNEL); if (!chain) return NULL; list_add_tail_rcu(&chain->list, &block->chain_list); mutex_init(&chain->filter_chain_lock); chain->block = block; chain->index = chain_index; chain->refcnt = 1; if (!chain->index) block->chain0.chain = chain; return chain; } static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item, struct tcf_proto *tp_head) { if (item->chain_head_change) item->chain_head_change(tp_head, item->chain_head_change_priv); } static void tcf_chain0_head_change(struct tcf_chain *chain, struct tcf_proto *tp_head) { struct tcf_filter_chain_list_item *item; struct tcf_block *block = chain->block; if (chain->index) return; mutex_lock(&block->lock); list_for_each_entry(item, &block->chain0.filter_chain_list, list) tcf_chain_head_change_item(item, tp_head); mutex_unlock(&block->lock); } /* Returns true if block can be safely freed. */ static bool tcf_chain_detach(struct tcf_chain *chain) { struct tcf_block *block = chain->block; ASSERT_BLOCK_LOCKED(block); list_del_rcu(&chain->list); if (!chain->index) block->chain0.chain = NULL; if (list_empty(&block->chain_list) && refcount_read(&block->refcnt) == 0) return true; return false; } static void tcf_block_destroy(struct tcf_block *block) { mutex_destroy(&block->lock); mutex_destroy(&block->proto_destroy_lock); xa_destroy(&block->ports); kfree_rcu(block, rcu); } static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block) { struct tcf_block *block = chain->block; mutex_destroy(&chain->filter_chain_lock); kfree_rcu(chain, rcu); if (free_block) tcf_block_destroy(block); } static void tcf_chain_hold(struct tcf_chain *chain) { ASSERT_BLOCK_LOCKED(chain->block); ++chain->refcnt; } static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain) { ASSERT_BLOCK_LOCKED(chain->block); /* In case all the references are action references, this * chain should not be shown to the user. */ return chain->refcnt == chain->action_refcnt; } static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, u32 chain_index) { struct tcf_chain *chain; ASSERT_BLOCK_LOCKED(block); list_for_each_entry(chain, &block->chain_list, list) { if (chain->index == chain_index) return chain; } return NULL; } #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, u32 chain_index) { struct tcf_chain *chain; list_for_each_entry_rcu(chain, &block->chain_list, list) { if (chain->index == chain_index) return chain; } return NULL; } #endif static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, u32 seq, u16 flags, int event, bool unicast, struct netlink_ext_ack *extack); static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, u32 chain_index, bool create, bool by_act) { struct tcf_chain *chain = NULL; bool is_first_reference; mutex_lock(&block->lock); chain = tcf_chain_lookup(block, chain_index); if (chain) { tcf_chain_hold(chain); } else { if (!create) goto errout; chain = tcf_chain_create(block, chain_index); if (!chain) goto errout; } if (by_act) ++chain->action_refcnt; is_first_reference = chain->refcnt - chain->action_refcnt == 1; mutex_unlock(&block->lock); /* Send notification only in case we got the first * non-action reference. Until then, the chain acts only as * a placeholder for actions pointing to it and user ought * not know about them. */ if (is_first_reference && !by_act) tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, RTM_NEWCHAIN, false, NULL); return chain; errout: mutex_unlock(&block->lock); return chain; } static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, bool create) { return __tcf_chain_get(block, chain_index, create, false); } struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) { return __tcf_chain_get(block, chain_index, true, true); } EXPORT_SYMBOL(tcf_chain_get_by_act); static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, void *tmplt_priv); static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, void *tmplt_priv, u32 chain_index, struct tcf_block *block, struct sk_buff *oskb, u32 seq, u16 flags); static void __tcf_chain_put(struct tcf_chain *chain, bool by_act, bool explicitly_created) { struct tcf_block *block = chain->block; const struct tcf_proto_ops *tmplt_ops; unsigned int refcnt, non_act_refcnt; bool free_block = false; void *tmplt_priv; mutex_lock(&block->lock); if (explicitly_created) { if (!chain->explicitly_created) { mutex_unlock(&block->lock); return; } chain->explicitly_created = false; } if (by_act) chain->action_refcnt--; /* tc_chain_notify_delete can't be called while holding block lock. * However, when block is unlocked chain can be changed concurrently, so * save these to temporary variables. */ refcnt = --chain->refcnt; non_act_refcnt = refcnt - chain->action_refcnt; tmplt_ops = chain->tmplt_ops; tmplt_priv = chain->tmplt_priv; if (non_act_refcnt == chain->explicitly_created && !by_act) { if (non_act_refcnt == 0) tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index, block, NULL, 0, 0); /* Last reference to chain, no need to lock. */ chain->flushing = false; } if (refcnt == 0) free_block = tcf_chain_detach(chain); mutex_unlock(&block->lock); if (refcnt == 0) { tc_chain_tmplt_del(tmplt_ops, tmplt_priv); tcf_chain_destroy(chain, free_block); } } static void tcf_chain_put(struct tcf_chain *chain) { __tcf_chain_put(chain, false, false); } void tcf_chain_put_by_act(struct tcf_chain *chain) { __tcf_chain_put(chain, true, false); } EXPORT_SYMBOL(tcf_chain_put_by_act); static void tcf_chain_put_explicitly_created(struct tcf_chain *chain) { __tcf_chain_put(chain, false, true); } static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held) { struct tcf_proto *tp, *tp_next; mutex_lock(&chain->filter_chain_lock); tp = tcf_chain_dereference(chain->filter_chain, chain); while (tp) { tp_next = rcu_dereference_protected(tp->next, 1); tcf_proto_signal_destroying(chain, tp); tp = tp_next; } tp = tcf_chain_dereference(chain->filter_chain, chain); RCU_INIT_POINTER(chain->filter_chain, NULL); tcf_chain0_head_change(chain, NULL); chain->flushing = true; mutex_unlock(&chain->filter_chain_lock); while (tp) { tp_next = rcu_dereference_protected(tp->next, 1); tcf_proto_put(tp, rtnl_held, NULL); tp = tp_next; } } static int tcf_block_setup(struct tcf_block *block, struct flow_block_offload *bo); static void tcf_block_offload_init(struct flow_block_offload *bo, struct net_device *dev, struct Qdisc *sch, enum flow_block_command command, enum flow_block_binder_type binder_type, struct flow_block *flow_block, bool shared, struct netlink_ext_ack *extack) { bo->net = dev_net(dev); bo->command = command; bo->binder_type = binder_type; bo->block = flow_block; bo->block_shared = shared; bo->extack = extack; bo->sch = sch; bo->cb_list_head = &flow_block->cb_list; INIT_LIST_HEAD(&bo->cb_list); } static void tcf_block_unbind(struct tcf_block *block, struct flow_block_offload *bo); static void tc_block_indr_cleanup(struct flow_block_cb *block_cb) { struct tcf_block *block = block_cb->indr.data; struct net_device *dev = block_cb->indr.dev; struct Qdisc *sch = block_cb->indr.sch; struct netlink_ext_ack extack = {}; struct flow_block_offload bo = {}; tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND, block_cb->indr.binder_type, &block->flow_block, tcf_block_shared(block), &extack); rtnl_lock(); down_write(&block->cb_lock); list_del(&block_cb->driver_list); list_move(&block_cb->list, &bo.cb_list); tcf_block_unbind(block, &bo); up_write(&block->cb_lock); rtnl_unlock(); } static bool tcf_block_offload_in_use(struct tcf_block *block) { return atomic_read(&block->offloadcnt); } static int tcf_block_offload_cmd(struct tcf_block *block, struct net_device *dev, struct Qdisc *sch, struct tcf_block_ext_info *ei, enum flow_block_command command, struct netlink_ext_ack *extack) { struct flow_block_offload bo = {}; tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type, &block->flow_block, tcf_block_shared(block), extack); if (dev->netdev_ops->ndo_setup_tc) { int err; err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); if (err < 0) { if (err != -EOPNOTSUPP) NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed"); return err; } return tcf_block_setup(block, &bo); } flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, tc_block_indr_cleanup); tcf_block_setup(block, &bo); return -EOPNOTSUPP; } static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, struct tcf_block_ext_info *ei, struct netlink_ext_ack *extack) { struct net_device *dev = q->dev_queue->dev; int err; down_write(&block->cb_lock); /* If tc offload feature is disabled and the block we try to bind * to already has some offloaded filters, forbid to bind. */ if (dev->netdev_ops->ndo_setup_tc && !tc_can_offload(dev) && tcf_block_offload_in_use(block)) { NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); err = -EOPNOTSUPP; goto err_unlock; } err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); if (err == -EOPNOTSUPP) goto no_offload_dev_inc; if (err) goto err_unlock; up_write(&block->cb_lock); return 0; no_offload_dev_inc: if (tcf_block_offload_in_use(block)) goto err_unlock; err = 0; block->nooffloaddevcnt++; err_unlock: up_write(&block->cb_lock); return err; } static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, struct tcf_block_ext_info *ei) { struct net_device *dev = q->dev_queue->dev; int err; down_write(&block->cb_lock); err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); if (err == -EOPNOTSUPP) goto no_offload_dev_dec; up_write(&block->cb_lock); return; no_offload_dev_dec: WARN_ON(block->nooffloaddevcnt-- == 0); up_write(&block->cb_lock); } static int tcf_chain0_head_change_cb_add(struct tcf_block *block, struct tcf_block_ext_info *ei, struct netlink_ext_ack *extack) { struct tcf_filter_chain_list_item *item; struct tcf_chain *chain0; item = kmalloc(sizeof(*item), GFP_KERNEL); if (!item) { NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed"); return -ENOMEM; } item->chain_head_change = ei->chain_head_change; item->chain_head_change_priv = ei->chain_head_change_priv; mutex_lock(&block->lock); chain0 = block->chain0.chain; if (chain0) tcf_chain_hold(chain0); else list_add(&item->list, &block->chain0.filter_chain_list); mutex_unlock(&block->lock); if (chain0) { struct tcf_proto *tp_head; mutex_lock(&chain0->filter_chain_lock); tp_head = tcf_chain_dereference(chain0->filter_chain, chain0); if (tp_head) tcf_chain_head_change_item(item, tp_head); mutex_lock(&block->lock); list_add(&item->list, &block->chain0.filter_chain_list); mutex_unlock(&block->lock); mutex_unlock(&chain0->filter_chain_lock); tcf_chain_put(chain0); } return 0; } static void tcf_chain0_head_change_cb_del(struct tcf_block *block, struct tcf_block_ext_info *ei) { struct tcf_filter_chain_list_item *item; mutex_lock(&block->lock); list_for_each_entry(item, &block->chain0.filter_chain_list, list) { if ((!ei->chain_head_change && !ei->chain_head_change_priv) || (item->chain_head_change == ei->chain_head_change && item->chain_head_change_priv == ei->chain_head_change_priv)) { if (block->chain0.chain) tcf_chain_head_change_item(item, NULL); list_del(&item->list); mutex_unlock(&block->lock); kfree(item); return; } } mutex_unlock(&block->lock); WARN_ON(1); } struct tcf_net { spinlock_t idr_lock; /* Protects idr */ struct idr idr; }; static unsigned int tcf_net_id; static int tcf_block_insert(struct tcf_block *block, struct net *net, struct netlink_ext_ack *extack) { struct tcf_net *tn = net_generic(net, tcf_net_id); int err; idr_preload(GFP_KERNEL); spin_lock(&tn->idr_lock); err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, GFP_NOWAIT); spin_unlock(&tn->idr_lock); idr_preload_end(); return err; } static void tcf_block_remove(struct tcf_block *block, struct net *net) { struct tcf_net *tn = net_generic(net, tcf_net_id); spin_lock(&tn->idr_lock); idr_remove(&tn->idr, block->index); spin_unlock(&tn->idr_lock); } static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, u32 block_index, struct netlink_ext_ack *extack) { struct tcf_block *block; block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) { NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); return ERR_PTR(-ENOMEM); } mutex_init(&block->lock); mutex_init(&block->proto_destroy_lock); init_rwsem(&block->cb_lock); flow_block_init(&block->flow_block); INIT_LIST_HEAD(&block->chain_list); INIT_LIST_HEAD(&block->owner_list); INIT_LIST_HEAD(&block->chain0.filter_chain_list); refcount_set(&block->refcnt, 1); block->net = net; block->index = block_index; xa_init(&block->ports); /* Don't store q pointer for blocks which are shared */ if (!tcf_block_shared(block)) block->q = q; return block; } struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index) { struct tcf_net *tn = net_generic(net, tcf_net_id); return idr_find(&tn->idr, block_index); } EXPORT_SYMBOL(tcf_block_lookup); static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index) { struct tcf_block *block; rcu_read_lock(); block = tcf_block_lookup(net, block_index); if (block && !refcount_inc_not_zero(&block->refcnt)) block = NULL; rcu_read_unlock(); return block; } static struct tcf_chain * __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) { mutex_lock(&block->lock); if (chain) chain = list_is_last(&chain->list, &block->chain_list) ? NULL : list_next_entry(chain, list); else chain = list_first_entry_or_null(&block->chain_list, struct tcf_chain, list); /* skip all action-only chains */ while (chain && tcf_chain_held_by_acts_only(chain)) chain = list_is_last(&chain->list, &block->chain_list) ? NULL : list_next_entry(chain, list); if (chain) tcf_chain_hold(chain); mutex_unlock(&block->lock); return chain; } /* Function to be used by all clients that want to iterate over all chains on * block. It properly obtains block->lock and takes reference to chain before * returning it. Users of this function must be tolerant to concurrent chain * insertion/deletion or ensure that no concurrent chain modification is * possible. Note that all netlink dump callbacks cannot guarantee to provide * consistent dump because rtnl lock is released each time skb is filled with * data and sent to user-space. */ struct tcf_chain * tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) { struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); if (chain) tcf_chain_put(chain); return chain_next; } EXPORT_SYMBOL(tcf_get_next_chain); static struct tcf_proto * __tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) { u32 prio = 0; ASSERT_RTNL(); mutex_lock(&chain->filter_chain_lock); if (!tp) { tp = tcf_chain_dereference(chain->filter_chain, chain); } else if (tcf_proto_is_deleting(tp)) { /* 'deleting' flag is set and chain->filter_chain_lock was * unlocked, which means next pointer could be invalid. Restart * search. */ prio = tp->prio + 1; tp = tcf_chain_dereference(chain->filter_chain, chain); for (; tp; tp = tcf_chain_dereference(tp->next, chain)) if (!tp->deleting && tp->prio >= prio) break; } else { tp = tcf_chain_dereference(tp->next, chain); } if (tp) tcf_proto_get(tp); mutex_unlock(&chain->filter_chain_lock); return tp; } /* Function to be used by all clients that want to iterate over all tp's on * chain. Users of this function must be tolerant to concurrent tp * insertion/deletion or ensure that no concurrent chain modification is * possible. Note that all netlink dump callbacks cannot guarantee to provide * consistent dump because rtnl lock is released each time skb is filled with * data and sent to user-space. */ struct tcf_proto * tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp) { struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp); if (tp) tcf_proto_put(tp, true, NULL); return tp_next; } EXPORT_SYMBOL(tcf_get_next_proto); static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) { struct tcf_chain *chain; /* Last reference to block. At this point chains cannot be added or * removed concurrently. */ for (chain = tcf_get_next_chain(block, NULL); chain; chain = tcf_get_next_chain(block, chain)) { tcf_chain_put_explicitly_created(chain); tcf_chain_flush(chain, rtnl_held); } } /* Lookup Qdisc and increments its reference counter. * Set parent, if necessary. */ static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, u32 *parent, int ifindex, bool rtnl_held, struct netlink_ext_ack *extack) { const struct Qdisc_class_ops *cops; struct net_device *dev; int err = 0; if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) return 0; rcu_read_lock(); /* Find link */ dev = dev_get_by_index_rcu(net, ifindex); if (!dev) { rcu_read_unlock(); return -ENODEV; } /* Find qdisc */ if (!*parent) { *q = rcu_dereference(dev->qdisc); *parent = (*q)->handle; } else { *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); if (!*q) { NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); err = -EINVAL; goto errout_rcu; } } *q = qdisc_refcount_inc_nz(*q); if (!*q) { NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists"); err = -EINVAL; goto errout_rcu; } /* Is it classful? */ cops = (*q)->ops->cl_ops; if (!cops) { NL_SET_ERR_MSG(extack, "Qdisc not classful"); err = -EINVAL; goto errout_qdisc; } if (!cops->tcf_block) { NL_SET_ERR_MSG(extack, "Class doesn't support blocks"); err = -EOPNOTSUPP; goto errout_qdisc; } errout_rcu: /* At this point we know that qdisc is not noop_qdisc, * which means that qdisc holds a reference to net_device * and we hold a reference to qdisc, so it is safe to release * rcu read lock. */ rcu_read_unlock(); return err; errout_qdisc: rcu_read_unlock(); if (rtnl_held) qdisc_put(*q); else qdisc_put_unlocked(*q); *q = NULL; return err; } static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, int ifindex, struct netlink_ext_ack *extack) { if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) return 0; /* Do we search for filter, attached to class? */ if (TC_H_MIN(parent)) { const struct Qdisc_class_ops *cops = q->ops->cl_ops; *cl = cops->find(q, parent); if (*cl == 0) { NL_SET_ERR_MSG(extack, "Specified class doesn't exist"); return -ENOENT; } } return 0; } static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, unsigned long cl, int ifindex, u32 block_index, struct netlink_ext_ack *extack) { struct tcf_block *block; if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) { block = tcf_block_refcnt_get(net, block_index); if (!block) { NL_SET_ERR_MSG(extack, "Block of given index was not found"); return ERR_PTR(-EINVAL); } } else { const struct Qdisc_class_ops *cops = q->ops->cl_ops; block = cops->tcf_block(q, cl, extack); if (!block) return ERR_PTR(-EINVAL); if (tcf_block_shared(block)) { NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters"); return ERR_PTR(-EOPNOTSUPP); } /* Always take reference to block in order to support execution * of rules update path of cls API without rtnl lock. Caller * must release block when it is finished using it. 'if' block * of this conditional obtain reference to block by calling * tcf_block_refcnt_get(). */ refcount_inc(&block->refcnt); } return block; } static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, struct tcf_block_ext_info *ei, bool rtnl_held) { if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { /* Flushing/putting all chains will cause the block to be * deallocated when last chain is freed. However, if chain_list * is empty, block has to be manually deallocated. After block * reference counter reached 0, it is no longer possible to * increment it or add new chains to block. */ bool free_block = list_empty(&block->chain_list); mutex_unlock(&block->lock); if (tcf_block_shared(block)) tcf_block_remove(block, block->net); if (q) tcf_block_offload_unbind(block, q, ei); if (free_block) tcf_block_destroy(block); else tcf_block_flush_all_chains(block, rtnl_held); } else if (q) { tcf_block_offload_unbind(block, q, ei); } } static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) { __tcf_block_put(block, NULL, NULL, rtnl_held); } /* Find tcf block. * Set q, parent, cl when appropriate. */ static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, u32 *parent, unsigned long *cl, int ifindex, u32 block_index, struct netlink_ext_ack *extack) { struct tcf_block *block; int err = 0; ASSERT_RTNL(); err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); if (err) goto errout; err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); if (err) goto errout_qdisc; block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); if (IS_ERR(block)) { err = PTR_ERR(block); goto errout_qdisc; } return block; errout_qdisc: if (*q) qdisc_put(*q); errout: *q = NULL; return ERR_PTR(err); } static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, bool rtnl_held) { if (!IS_ERR_OR_NULL(block)) tcf_block_refcnt_put(block, rtnl_held); if (q) { if (rtnl_held) qdisc_put(q); else qdisc_put_unlocked(q); } } struct tcf_block_owner_item { struct list_head list; struct Qdisc *q; enum flow_block_binder_type binder_type; }; static void tcf_block_owner_netif_keep_dst(struct tcf_block *block, struct Qdisc *q, enum flow_block_binder_type binder_type) { if (block->keep_dst && binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) netif_keep_dst(qdisc_dev(q)); } void tcf_block_netif_keep_dst(struct tcf_block *block) { struct tcf_block_owner_item *item; block->keep_dst = true; list_for_each_entry(item, &block->owner_list, list) tcf_block_owner_netif_keep_dst(block, item->q, item->binder_type); } EXPORT_SYMBOL(tcf_block_netif_keep_dst); static int tcf_block_owner_add(struct tcf_block *block, struct Qdisc *q, enum flow_block_binder_type binder_type) { struct tcf_block_owner_item *item; item = kmalloc(sizeof(*item), GFP_KERNEL); if (!item) return -ENOMEM; item->q = q; item->binder_type = binder_type; list_add(&item->list, &block->owner_list); return 0; } static void tcf_block_owner_del(struct tcf_block *block, struct Qdisc *q, enum flow_block_binder_type binder_type) { struct tcf_block_owner_item *item; list_for_each_entry(item, &block->owner_list, list) { if (item->q == q && item->binder_type == binder_type) { list_del(&item->list); kfree(item); return; } } WARN_ON(1); } static bool tcf_block_tracks_dev(struct tcf_block *block, struct tcf_block_ext_info *ei) { return tcf_block_shared(block) && (ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS || ei->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS); } int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, struct tcf_block_ext_info *ei, struct netlink_ext_ack *extack) { struct net_device *dev = qdisc_dev(q); struct net *net = qdisc_net(q); struct tcf_block *block = NULL; int err; if (ei->block_index) /* block_index not 0 means the shared block is requested */ block = tcf_block_refcnt_get(net, ei->block_index); if (!block) { block = tcf_block_create(net, q, ei->block_index, extack); if (IS_ERR(block)) return PTR_ERR(block); if (tcf_block_shared(block)) { err = tcf_block_insert(block, net, extack); if (err) goto err_block_insert; } } err = tcf_block_owner_add(block, q, ei->binder_type); if (err) goto err_block_owner_add; tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); err = tcf_chain0_head_change_cb_add(block, ei, extack); if (err) goto err_chain0_head_change_cb_add; err = tcf_block_offload_bind(block, q, ei, extack); if (err) goto err_block_offload_bind; if (tcf_block_tracks_dev(block, ei)) { err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL); if (err) { NL_SET_ERR_MSG(extack, "block dev insert failed"); goto err_dev_insert; } } *p_block = block; return 0; err_dev_insert: tcf_block_offload_unbind(block, q, ei); err_block_offload_bind: tcf_chain0_head_change_cb_del(block, ei); err_chain0_head_change_cb_add: tcf_block_owner_del(block, q, ei->binder_type); err_block_owner_add: err_block_insert: tcf_block_refcnt_put(block, true); return err; } EXPORT_SYMBOL(tcf_block_get_ext); static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv) { struct tcf_proto __rcu **p_filter_chain = priv; rcu_assign_pointer(*p_filter_chain, tp_head); } int tcf_block_get(struct tcf_block **p_block, struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, struct netlink_ext_ack *extack) { struct tcf_block_ext_info ei = { .chain_head_change = tcf_chain_head_change_dflt, .chain_head_change_priv = p_filter_chain, }; WARN_ON(!p_filter_chain); return tcf_block_get_ext(p_block, q, &ei, extack); } EXPORT_SYMBOL(tcf_block_get); /* XXX: Standalone actions are not allowed to jump to any chain, and bound * actions should be all removed after flushing. */ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, struct tcf_block_ext_info *ei) { struct net_device *dev = qdisc_dev(q); if (!block) return; if (tcf_block_tracks_dev(block, ei)) xa_erase(&block->ports, dev->ifindex); tcf_chain0_head_change_cb_del(block, ei); tcf_block_owner_del(block, q, ei->binder_type); __tcf_block_put(block, q, ei, true); } EXPORT_SYMBOL(tcf_block_put_ext); void tcf_block_put(struct tcf_block *block) { struct tcf_block_ext_info ei = {0, }; if (!block) return; tcf_block_put_ext(block, block->q, &ei); } EXPORT_SYMBOL(tcf_block_put); static int tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, void *cb_priv, bool add, bool offload_in_use, struct netlink_ext_ack *extack) { struct tcf_chain *chain, *chain_prev; struct tcf_proto *tp, *tp_prev; int err; lockdep_assert_held(&block->cb_lock); for (chain = __tcf_get_next_chain(block, NULL); chain; chain_prev = chain, chain = __tcf_get_next_chain(block, chain), tcf_chain_put(chain_prev)) { if (chain->tmplt_ops && add) chain->tmplt_ops->tmplt_reoffload(chain, true, cb, cb_priv); for (tp = __tcf_get_next_proto(chain, NULL); tp; tp_prev = tp, tp = __tcf_get_next_proto(chain, tp), tcf_proto_put(tp_prev, true, NULL)) { if (tp->ops->reoffload) { err = tp->ops->reoffload(tp, add, cb, cb_priv, extack); if (err && add) goto err_playback_remove; } else if (add && offload_in_use) { err = -EOPNOTSUPP; NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support"); goto err_playback_remove; } } if (chain->tmplt_ops && !add) chain->tmplt_ops->tmplt_reoffload(chain, false, cb, cb_priv); } return 0; err_playback_remove: tcf_proto_put(tp, true, NULL); tcf_chain_put(chain); tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, extack); return err; } static int tcf_block_bind(struct tcf_block *block, struct flow_block_offload *bo) { struct flow_block_cb *block_cb, *next; int err, i = 0; lockdep_assert_held(&block->cb_lock); list_for_each_entry(block_cb, &bo->cb_list, list) { err = tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv, true, tcf_block_offload_in_use(block), bo->extack); if (err) goto err_unroll; if (!bo->unlocked_driver_cb) block->lockeddevcnt++; i++; } list_splice(&bo->cb_list, &block->flow_block.cb_list); return 0; err_unroll: list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { list_del(&block_cb->driver_list); if (i-- > 0) { list_del(&block_cb->list); tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv, false, tcf_block_offload_in_use(block), NULL); if (!bo->unlocked_driver_cb) block->lockeddevcnt--; } flow_block_cb_free(block_cb); } return err; } static void tcf_block_unbind(struct tcf_block *block, struct flow_block_offload *bo) { struct flow_block_cb *block_cb, *next; lockdep_assert_held(&block->cb_lock); list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) { tcf_block_playback_offloads(block, block_cb->cb, block_cb->cb_priv, false, tcf_block_offload_in_use(block), NULL); list_del(&block_cb->list); flow_block_cb_free(block_cb); if (!bo->unlocked_driver_cb) block->lockeddevcnt--; } } static int tcf_block_setup(struct tcf_block *block, struct flow_block_offload *bo) { int err; switch (bo->command) { case FLOW_BLOCK_BIND: err = tcf_block_bind(block, bo); break; case FLOW_BLOCK_UNBIND: err = 0; tcf_block_unbind(block, bo); break; default: WARN_ON_ONCE(1); err = -EOPNOTSUPP; } return err; } /* Main classifier routine: scans classifier chain attached * to this qdisc, (optionally) tests for protocol and asks * specific classifiers. */ static inline int __tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, const struct tcf_proto *orig_tp, struct tcf_result *res, bool compat_mode, struct tcf_exts_miss_cookie_node *n, int act_index, u32 *last_executed_chain) { #ifdef CONFIG_NET_CLS_ACT const int max_reclassify_loop = 16; const struct tcf_proto *first_tp; int limit = 0; reclassify: #endif for (; tp; tp = rcu_dereference_bh(tp->next)) { __be16 protocol = skb_protocol(skb, false); int err = 0; if (n) { struct tcf_exts *exts; if (n->tp_prio != tp->prio) continue; /* We re-lookup the tp and chain based on index instead * of having hard refs and locks to them, so do a sanity * check if any of tp,chain,exts was replaced by the * time we got here with a cookie from hardware. */ if (unlikely(n->tp != tp || n->tp->chain != n->chain || !tp->ops->get_exts)) { tcf_set_drop_reason(skb, SKB_DROP_REASON_TC_COOKIE_ERROR); return TC_ACT_SHOT; } exts = tp->ops->get_exts(tp, n->handle); if (unlikely(!exts || n->exts != exts)) { tcf_set_drop_reason(skb, SKB_DROP_REASON_TC_COOKIE_ERROR); return TC_ACT_SHOT; } n = NULL; err = tcf_exts_exec_ex(skb, exts, act_index, res); } else { if (tp->protocol != protocol && tp->protocol != htons(ETH_P_ALL)) continue; err = tc_classify(skb, tp, res); } #ifdef CONFIG_NET_CLS_ACT if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) { first_tp = orig_tp; *last_executed_chain = first_tp->chain->index; goto reset; } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { first_tp = res->goto_tp; *last_executed_chain = err & TC_ACT_EXT_VAL_MASK; goto reset; } #endif if (err >= 0) return err; } if (unlikely(n)) { tcf_set_drop_reason(skb, SKB_DROP_REASON_TC_COOKIE_ERROR); return TC_ACT_SHOT; } return TC_ACT_UNSPEC; /* signal: continue lookup */ #ifdef CONFIG_NET_CLS_ACT reset: if (unlikely(limit++ >= max_reclassify_loop)) { net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n", tp->chain->block->index, tp->prio & 0xffff, ntohs(tp->protocol)); tcf_set_drop_reason(skb, SKB_DROP_REASON_TC_RECLASSIFY_LOOP); return TC_ACT_SHOT; } tp = first_tp; goto reclassify; #endif } int tcf_classify(struct sk_buff *skb, const struct tcf_block *block, const struct tcf_proto *tp, struct tcf_result *res, bool compat_mode) { #if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) u32 last_executed_chain = 0; return __tcf_classify(skb, tp, tp, res, compat_mode, NULL, 0, &last_executed_chain); #else u32 last_executed_chain = tp ? tp->chain->index : 0; struct tcf_exts_miss_cookie_node *n = NULL; const struct tcf_proto *orig_tp = tp; struct tc_skb_ext *ext; int act_index = 0; int ret; if (block) { ext = skb_ext_find(skb, TC_SKB_EXT); if (ext && (ext->chain || ext->act_miss)) { struct tcf_chain *fchain; u32 chain; if (ext->act_miss) { n = tcf_exts_miss_cookie_lookup(ext->act_miss_cookie, &act_index); if (!n) { tcf_set_drop_reason(skb, SKB_DROP_REASON_TC_COOKIE_ERROR); return TC_ACT_SHOT; } chain = n->chain_index; } else { chain = ext->chain; } fchain = tcf_chain_lookup_rcu(block, chain); if (!fchain) { tcf_set_drop_reason(skb, SKB_DROP_REASON_TC_CHAIN_NOTFOUND); return TC_ACT_SHOT; } /* Consume, so cloned/redirect skbs won't inherit ext */ skb_ext_del(skb, TC_SKB_EXT); tp = rcu_dereference_bh(fchain->filter_chain); last_executed_chain = fchain->index; } } ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode, n, act_index, &last_executed_chain); if (tc_skb_ext_tc_enabled()) { /* If we missed on some chain */ if (ret == TC_ACT_UNSPEC && last_executed_chain) { struct tc_skb_cb *cb = tc_skb_cb(skb); ext = tc_skb_ext_alloc(skb); if (WARN_ON_ONCE(!ext)) { tcf_set_drop_reason(skb, SKB_DROP_REASON_NOMEM); return TC_ACT_SHOT; } ext->chain = last_executed_chain; ext->mru = cb->mru; ext->post_ct = cb->post_ct; ext->post_ct_snat = cb->post_ct_snat; ext->post_ct_dnat = cb->post_ct_dnat; ext->zone = cb->zone; } } return ret; #endif } EXPORT_SYMBOL(tcf_classify); struct tcf_chain_info { struct tcf_proto __rcu **pprev; struct tcf_proto __rcu *next; }; static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain, struct tcf_chain_info *chain_info) { return tcf_chain_dereference(*chain_info->pprev, chain); } static int tcf_chain_tp_insert(struct tcf_chain *chain, struct tcf_chain_info *chain_info, struct tcf_proto *tp) { if (chain->flushing) return -EAGAIN; RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info)); if (*chain_info->pprev == chain->filter_chain) tcf_chain0_head_change(chain, tp); tcf_proto_get(tp); rcu_assign_pointer(*chain_info->pprev, tp); return 0; } static void tcf_chain_tp_remove(struct tcf_chain *chain, struct tcf_chain_info *chain_info, struct tcf_proto *tp) { struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain); tcf_proto_mark_delete(tp); if (tp == chain->filter_chain) tcf_chain0_head_change(chain, next); RCU_INIT_POINTER(*chain_info->pprev, next); } static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, struct tcf_chain_info *chain_info, u32 protocol, u32 prio, bool prio_allocate, struct netlink_ext_ack *extack); /* Try to insert new proto. * If proto with specified priority already exists, free new proto * and return existing one. */ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain, struct tcf_proto *tp_new, u32 protocol, u32 prio, bool rtnl_held) { struct tcf_chain_info chain_info; struct tcf_proto *tp; int err = 0; mutex_lock(&chain->filter_chain_lock); if (tcf_proto_exists_destroying(chain, tp_new)) { mutex_unlock(&chain->filter_chain_lock); tcf_proto_destroy(tp_new, rtnl_held, false, NULL); return ERR_PTR(-EAGAIN); } tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL); if (!tp) err = tcf_chain_tp_insert(chain, &chain_info, tp_new); mutex_unlock(&chain->filter_chain_lock); if (tp) { tcf_proto_destroy(tp_new, rtnl_held, false, NULL); tp_new = tp; } else if (err) { tcf_proto_destroy(tp_new, rtnl_held, false, NULL); tp_new = ERR_PTR(err); } return tp_new; } static void tcf_chain_tp_delete_empty(struct tcf_chain *chain, struct tcf_proto *tp, bool rtnl_held, struct netlink_ext_ack *extack) { struct tcf_chain_info chain_info; struct tcf_proto *tp_iter; struct tcf_proto **pprev; struct tcf_proto *next; mutex_lock(&chain->filter_chain_lock); /* Atomically find and remove tp from chain. */ for (pprev = &chain->filter_chain; (tp_iter = tcf_chain_dereference(*pprev, chain)); pprev = &tp_iter->next) { if (tp_iter == tp) { chain_info.pprev = pprev; chain_info.next = tp_iter->next; WARN_ON(tp_iter->deleting); break; } } /* Verify that tp still exists and no new filters were inserted * concurrently. * Mark tp for deletion if it is empty. */ if (!tp_iter || !tcf_proto_check_delete(tp)) { mutex_unlock(&chain->filter_chain_lock); return; } tcf_proto_signal_destroying(chain, tp); next = tcf_chain_dereference(chain_info.next, chain); if (tp == chain->filter_chain) tcf_chain0_head_change(chain, next); RCU_INIT_POINTER(*chain_info.pprev, next); mutex_unlock(&chain->filter_chain_lock); tcf_proto_put(tp, rtnl_held, extack); } static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, struct tcf_chain_info *chain_info, u32 protocol, u32 prio, bool prio_allocate, struct netlink_ext_ack *extack) { struct tcf_proto **pprev; struct tcf_proto *tp; /* Check the chain for existence of proto-tcf with this priority */ for (pprev = &chain->filter_chain; (tp = tcf_chain_dereference(*pprev, chain)); pprev = &tp->next) { if (tp->prio >= prio) { if (tp->prio == prio) { if (prio_allocate) { NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use"); return ERR_PTR(-ENOSPC); } if (tp->protocol != protocol && protocol) { NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority"); return ERR_PTR(-EINVAL); } } else { tp = NULL; } break; } } chain_info->pprev = pprev; if (tp) { chain_info->next = tp->next; tcf_proto_get(tp); } else { chain_info->next = NULL; } return tp; } static int tcf_fill_node(struct net *net, struct sk_buff *skb, struct tcf_proto *tp, struct tcf_block *block, struct Qdisc *q, u32 parent, void *fh, u32 portid, u32 seq, u16 flags, int event, bool terse_dump, bool rtnl_held, struct netlink_ext_ack *extack) { struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); int ret = -EMSGSIZE; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); if (!nlh) goto out_nlmsg_trim; tcm = nlmsg_data(nlh); tcm->tcm_family = AF_UNSPEC; tcm->tcm__pad1 = 0; tcm->tcm__pad2 = 0; if (q) { tcm->tcm_ifindex = qdisc_dev(q)->ifindex; tcm->tcm_parent = parent; } else { tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; tcm->tcm_block_index = block->index; } tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) goto nla_put_failure; if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) goto nla_put_failure; if (!fh) { tcm->tcm_handle = 0; } else if (terse_dump) { if (tp->ops->terse_dump) { if (tp->ops->terse_dump(net, tp, fh, skb, tcm, rtnl_held) < 0) goto nla_put_failure; } else { goto cls_op_not_supp; } } else { if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0) goto nla_put_failure; } if (extack && extack->_msg && nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) goto nla_put_failure; nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; cls_op_not_supp: ret = -EOPNOTSUPP; out_nlmsg_trim: nla_put_failure: nlmsg_trim(skb, b); return ret; } static struct sk_buff *tfilter_notify_prep(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, struct tcf_block *block, struct Qdisc *q, u32 parent, void *fh, int event, u32 portid, bool rtnl_held, struct netlink_ext_ack *extack) { unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE; struct sk_buff *skb; int ret; retry: skb = alloc_skb(size, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOBUFS); ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, n->nlmsg_seq, n->nlmsg_flags, event, false, rtnl_held, extack); if (ret <= 0) { kfree_skb(skb); if (ret == -EMSGSIZE) { size += NLMSG_GOODSIZE; goto retry; } return ERR_PTR(-EINVAL); } return skb; } static int tfilter_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, struct tcf_block *block, struct Qdisc *q, u32 parent, void *fh, int event, bool unicast, bool rtnl_held, struct netlink_ext_ack *extack) { struct sk_buff *skb; u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; int err = 0; if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) return 0; skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event, portid, rtnl_held, extack); if (IS_ERR(skb)) return PTR_ERR(skb); if (unicast) err = rtnl_unicast(skb, net, portid); else err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); return err; } static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, struct tcf_block *block, struct Qdisc *q, u32 parent, void *fh, bool *last, bool rtnl_held, struct netlink_ext_ack *extack) { struct sk_buff *skb; u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; int err; if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) return tp->ops->delete(tp, fh, last, rtnl_held, extack); skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, RTM_DELTFILTER, portid, rtnl_held, extack); if (IS_ERR(skb)) { NL_SET_ERR_MSG(extack, "Failed to build del event notification"); return PTR_ERR(skb); } err = tp->ops->delete(tp, fh, last, rtnl_held, extack); if (err) { kfree_skb(skb); return err; } err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, n->nlmsg_flags & NLM_F_ECHO); if (err < 0) NL_SET_ERR_MSG(extack, "Failed to send filter delete notification"); return err; } static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, struct tcf_block *block, struct Qdisc *q, u32 parent, struct nlmsghdr *n, struct tcf_chain *chain, int event, struct netlink_ext_ack *extack) { struct tcf_proto *tp; for (tp = tcf_get_next_proto(chain, NULL); tp; tp = tcf_get_next_proto(chain, tp)) tfilter_notify(net, oskb, n, tp, block, q, parent, NULL, event, false, true, extack); } static void tfilter_put(struct tcf_proto *tp, void *fh) { if (tp->ops->put && fh) tp->ops->put(tp, fh); } static bool is_qdisc_ingress(__u32 classid) { return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS)); } static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; char name[IFNAMSIZ]; struct tcmsg *t; u32 protocol; u32 prio; bool prio_allocate; u32 parent; u32 chain_index; struct Qdisc *q; struct tcf_chain_info chain_info; struct tcf_chain *chain; struct tcf_block *block; struct tcf_proto *tp; unsigned long cl; void *fh; int err; int tp_created; bool rtnl_held = false; u32 flags; replay: tp_created = 0; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) return err; t = nlmsg_data(n); protocol = TC_H_MIN(t->tcm_info); prio = TC_H_MAJ(t->tcm_info); prio_allocate = false; parent = t->tcm_parent; tp = NULL; cl = 0; block = NULL; q = NULL; chain = NULL; flags = 0; if (prio == 0) { /* If no priority is provided by the user, * we allocate one. */ if (n->nlmsg_flags & NLM_F_CREATE) { prio = TC_H_MAKE(0x80000000U, 0U); prio_allocate = true; } else { NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); return -ENOENT; } } /* Find head of filter chain. */ err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); if (err) return err; if (tcf_proto_check_kind(tca[TCA_KIND], name)) { NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); err = -EINVAL; goto errout; } /* Take rtnl mutex if rtnl_held was set to true on previous iteration, * block is shared (no qdisc found), qdisc is not unlocked, classifier * type is not specified, classifier is not unlocked. */ if (rtnl_held || (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || !tcf_proto_is_unlocked(name)) { rtnl_held = true; rtnl_lock(); } err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); if (err) goto errout; block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, extack); if (IS_ERR(block)) { err = PTR_ERR(block); goto errout; } block->classid = parent; chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0); if (chain_index > TC_ACT_EXT_VAL_MASK) { NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); err = -EINVAL; goto errout; } chain = tcf_chain_get(block, chain_index, true); if (!chain) { NL_SET_ERR_MSG(extack, "Cannot create specified filter chain"); err = -ENOMEM; goto errout; } mutex_lock(&chain->filter_chain_lock); tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, prio_allocate, extack); if (IS_ERR(tp)) { err = PTR_ERR(tp); goto errout_locked; } if (tp == NULL) { struct tcf_proto *tp_new = NULL; if (chain->flushing) { err = -EAGAIN; goto errout_locked; } /* Proto-tcf does not exist, create new one */ if (tca[TCA_KIND] == NULL || !protocol) { NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified"); err = -EINVAL; goto errout_locked; } if (!(n->nlmsg_flags & NLM_F_CREATE)) { NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); err = -ENOENT; goto errout_locked; } if (prio_allocate) prio = tcf_auto_prio(tcf_chain_tp_prev(chain, &chain_info)); mutex_unlock(&chain->filter_chain_lock); tp_new = tcf_proto_create(name, protocol, prio, chain, rtnl_held, extack); if (IS_ERR(tp_new)) { err = PTR_ERR(tp_new); goto errout_tp; } tp_created = 1; tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio, rtnl_held); if (IS_ERR(tp)) { err = PTR_ERR(tp); goto errout_tp; } } else { mutex_unlock(&chain->filter_chain_lock); } if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); err = -EINVAL; goto errout; } fh = tp->ops->get(tp, t->tcm_handle); if (!fh) { if (!(n->nlmsg_flags & NLM_F_CREATE)) { NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter"); err = -ENOENT; goto errout; } } else if (n->nlmsg_flags & NLM_F_EXCL) { tfilter_put(tp, fh); NL_SET_ERR_MSG(extack, "Filter already exists"); err = -EEXIST; goto errout; } if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) { tfilter_put(tp, fh); NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind"); err = -EINVAL; goto errout; } if (!(n->nlmsg_flags & NLM_F_CREATE)) flags |= TCA_ACT_FLAGS_REPLACE; if (!rtnl_held) flags |= TCA_ACT_FLAGS_NO_RTNL; if (is_qdisc_ingress(parent)) flags |= TCA_ACT_FLAGS_AT_INGRESS; err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh, flags, extack); if (err == 0) { tfilter_notify(net, skb, n, tp, block, q, parent, fh, RTM_NEWTFILTER, false, rtnl_held, extack); tfilter_put(tp, fh); tcf_proto_count_usesw(tp, true); /* q pointer is NULL for shared blocks */ if (q) q->flags &= ~TCQ_F_CAN_BYPASS; } errout: if (err && tp_created) tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL); errout_tp: if (chain) { if (tp && !IS_ERR(tp)) tcf_proto_put(tp, rtnl_held, NULL); if (!tp_created) tcf_chain_put(chain); } tcf_block_release(q, block, rtnl_held); if (rtnl_held) rtnl_unlock(); if (err == -EAGAIN) { /* Take rtnl lock in case EAGAIN is caused by concurrent flush * of target chain. */ rtnl_held = true; /* Replay the request. */ goto replay; } return err; errout_locked: mutex_unlock(&chain->filter_chain_lock); goto errout; } static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; char name[IFNAMSIZ]; struct tcmsg *t; u32 protocol; u32 prio; u32 parent; u32 chain_index; struct Qdisc *q = NULL; struct tcf_chain_info chain_info; struct tcf_chain *chain = NULL; struct tcf_block *block = NULL; struct tcf_proto *tp = NULL; unsigned long cl = 0; void *fh = NULL; int err; bool rtnl_held = false; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) return err; t = nlmsg_data(n); protocol = TC_H_MIN(t->tcm_info); prio = TC_H_MAJ(t->tcm_info); parent = t->tcm_parent; if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) { NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set"); return -ENOENT; } /* Find head of filter chain. */ err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); if (err) return err; if (tcf_proto_check_kind(tca[TCA_KIND], name)) { NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); err = -EINVAL; goto errout; } /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc * found), qdisc is not unlocked, classifier type is not specified, * classifier is not unlocked. */ if (!prio || (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || !tcf_proto_is_unlocked(name)) { rtnl_held = true; rtnl_lock(); } err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); if (err) goto errout; block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, extack); if (IS_ERR(block)) { err = PTR_ERR(block); goto errout; } chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0); if (chain_index > TC_ACT_EXT_VAL_MASK) { NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); err = -EINVAL; goto errout; } chain = tcf_chain_get(block, chain_index, false); if (!chain) { /* User requested flush on non-existent chain. Nothing to do, * so just return success. */ if (prio == 0) { err = 0; goto errout; } NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); err = -ENOENT; goto errout; } if (prio == 0) { tfilter_notify_chain(net, skb, block, q, parent, n, chain, RTM_DELTFILTER, extack); tcf_chain_flush(chain, rtnl_held); err = 0; goto errout; } mutex_lock(&chain->filter_chain_lock); tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, extack); if (!tp) { err = -ENOENT; NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); goto errout_locked; } else if (IS_ERR(tp)) { err = PTR_ERR(tp); goto errout_locked; } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); err = -EINVAL; goto errout_locked; } else if (t->tcm_handle == 0) { tcf_proto_signal_destroying(chain, tp); tcf_chain_tp_remove(chain, &chain_info, tp); mutex_unlock(&chain->filter_chain_lock); tcf_proto_put(tp, rtnl_held, NULL); tfilter_notify(net, skb, n, tp, block, q, parent, fh, RTM_DELTFILTER, false, rtnl_held, extack); err = 0; goto errout; } mutex_unlock(&chain->filter_chain_lock); fh = tp->ops->get(tp, t->tcm_handle); if (!fh) { NL_SET_ERR_MSG(extack, "Specified filter handle not found"); err = -ENOENT; } else { bool last; err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh, &last, rtnl_held, extack); if (err) goto errout; if (last) tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack); } errout: if (chain) { if (tp && !IS_ERR(tp)) tcf_proto_put(tp, rtnl_held, NULL); tcf_chain_put(chain); } tcf_block_release(q, block, rtnl_held); if (rtnl_held) rtnl_unlock(); return err; errout_locked: mutex_unlock(&chain->filter_chain_lock); goto errout; } static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; char name[IFNAMSIZ]; struct tcmsg *t; u32 protocol; u32 prio; u32 parent; u32 chain_index; struct Qdisc *q = NULL; struct tcf_chain_info chain_info; struct tcf_chain *chain = NULL; struct tcf_block *block = NULL; struct tcf_proto *tp = NULL; unsigned long cl = 0; void *fh = NULL; int err; bool rtnl_held = false; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) return err; t = nlmsg_data(n); protocol = TC_H_MIN(t->tcm_info); prio = TC_H_MAJ(t->tcm_info); parent = t->tcm_parent; if (prio == 0) { NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero"); return -ENOENT; } /* Find head of filter chain. */ err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); if (err) return err; if (tcf_proto_check_kind(tca[TCA_KIND], name)) { NL_SET_ERR_MSG(extack, "Specified TC filter name too long"); err = -EINVAL; goto errout; } /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not * unlocked, classifier type is not specified, classifier is not * unlocked. */ if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || !tcf_proto_is_unlocked(name)) { rtnl_held = true; rtnl_lock(); } err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); if (err) goto errout; block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, extack); if (IS_ERR(block)) { err = PTR_ERR(block); goto errout; } chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0); if (chain_index > TC_ACT_EXT_VAL_MASK) { NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); err = -EINVAL; goto errout; } chain = tcf_chain_get(block, chain_index, false); if (!chain) { NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); err = -EINVAL; goto errout; } mutex_lock(&chain->filter_chain_lock); tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, extack); mutex_unlock(&chain->filter_chain_lock); if (!tp) { err = -ENOENT; NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found"); goto errout; } else if (IS_ERR(tp)) { err = PTR_ERR(tp); goto errout; } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) { NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one"); err = -EINVAL; goto errout; } fh = tp->ops->get(tp, t->tcm_handle); if (!fh) { NL_SET_ERR_MSG(extack, "Specified filter handle not found"); err = -ENOENT; } else { err = tfilter_notify(net, skb, n, tp, block, q, parent, fh, RTM_NEWTFILTER, true, rtnl_held, NULL); if (err < 0) NL_SET_ERR_MSG(extack, "Failed to send filter notify message"); } tfilter_put(tp, fh); errout: if (chain) { if (tp && !IS_ERR(tp)) tcf_proto_put(tp, rtnl_held, NULL); tcf_chain_put(chain); } tcf_block_release(q, block, rtnl_held); if (rtnl_held) rtnl_unlock(); return err; } struct tcf_dump_args { struct tcf_walker w; struct sk_buff *skb; struct netlink_callback *cb; struct tcf_block *block; struct Qdisc *q; u32 parent; bool terse_dump; }; static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) { struct tcf_dump_args *a = (void *)arg; struct net *net = sock_net(a->skb->sk); return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, n, NETLINK_CB(a->cb->skb).portid, a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER, a->terse_dump, true, NULL); } static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, struct sk_buff *skb, struct netlink_callback *cb, long index_start, long *p_index, bool terse) { struct net *net = sock_net(skb->sk); struct tcf_block *block = chain->block; struct tcmsg *tcm = nlmsg_data(cb->nlh); struct tcf_proto *tp, *tp_prev; struct tcf_dump_args arg; for (tp = __tcf_get_next_proto(chain, NULL); tp; tp_prev = tp, tp = __tcf_get_next_proto(chain, tp), tcf_proto_put(tp_prev, true, NULL), (*p_index)++) { if (*p_index < index_start) continue; if (TC_H_MAJ(tcm->tcm_info) && TC_H_MAJ(tcm->tcm_info) != tp->prio) continue; if (TC_H_MIN(tcm->tcm_info) && TC_H_MIN(tcm->tcm_info) != tp->protocol) continue; if (*p_index > index_start) memset(&cb->args[1], 0, sizeof(cb->args) - sizeof(cb->args[0])); if (cb->args[1] == 0) { if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER, false, true, NULL) <= 0) goto errout; cb->args[1] = 1; } if (!tp->ops->walk) continue; arg.w.fn = tcf_node_dump; arg.skb = skb; arg.cb = cb; arg.block = block; arg.q = q; arg.parent = parent; arg.w.stop = 0; arg.w.skip = cb->args[1] - 1; arg.w.count = 0; arg.w.cookie = cb->args[2]; arg.terse_dump = terse; tp->ops->walk(tp, &arg.w, true); cb->args[2] = arg.w.cookie; cb->args[1] = arg.w.count + 1; if (arg.w.stop) goto errout; } return true; errout: tcf_proto_put(tp, true, NULL); return false; } static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = { [TCA_CHAIN] = { .type = NLA_U32 }, [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE), }; /* called with RTNL */ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) { struct tcf_chain *chain, *chain_prev; struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; struct Qdisc *q = NULL; struct tcf_block *block; struct tcmsg *tcm = nlmsg_data(cb->nlh); bool terse_dump = false; long index_start; long index; u32 parent; int err; if (nlmsg_len(cb->nlh) < sizeof(*tcm)) return skb->len; err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, tcf_tfilter_dump_policy, cb->extack); if (err) return err; if (tca[TCA_DUMP_FLAGS]) { struct nla_bitfield32 flags = nla_get_bitfield32(tca[TCA_DUMP_FLAGS]); terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE; } if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { block = tcf_block_refcnt_get(net, tcm->tcm_block_index); if (!block) goto out; /* If we work with block index, q is NULL and parent value * will never be used in the following code. The check * in tcf_fill_node prevents it. However, compiler does not * see that far, so set parent to zero to silence the warning * about parent being uninitialized. */ parent = 0; } else { const struct Qdisc_class_ops *cops; struct net_device *dev; unsigned long cl = 0; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return skb->len; parent = tcm->tcm_parent; if (!parent) q = rtnl_dereference(dev->qdisc); else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); if (!q) goto out; cops = q->ops->cl_ops; if (!cops) goto out; if (!cops->tcf_block) goto out; if (TC_H_MIN(tcm->tcm_parent)) { cl = cops->find(q, tcm->tcm_parent); if (cl == 0) goto out; } block = cops->tcf_block(q, cl, NULL); if (!block) goto out; parent = block->classid; if (tcf_block_shared(block)) q = NULL; } index_start = cb->args[0]; index = 0; for (chain = __tcf_get_next_chain(block, NULL); chain; chain_prev = chain, chain = __tcf_get_next_chain(block, chain), tcf_chain_put(chain_prev)) { if (tca[TCA_CHAIN] && nla_get_u32(tca[TCA_CHAIN]) != chain->index) continue; if (!tcf_chain_dump(chain, q, parent, skb, cb, index_start, &index, terse_dump)) { tcf_chain_put(chain); err = -EMSGSIZE; break; } } if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) tcf_block_refcnt_put(block, true); cb->args[0] = index; out: /* If we did no progress, the error (EMSGSIZE) is real */ if (skb->len == 0 && err) return err; return skb->len; } static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops, void *tmplt_priv, u32 chain_index, struct net *net, struct sk_buff *skb, struct tcf_block *block, u32 portid, u32 seq, u16 flags, int event, struct netlink_ext_ack *extack) { unsigned char *b = skb_tail_pointer(skb); const struct tcf_proto_ops *ops; struct nlmsghdr *nlh; struct tcmsg *tcm; void *priv; ops = tmplt_ops; priv = tmplt_priv; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); if (!nlh) goto out_nlmsg_trim; tcm = nlmsg_data(nlh); tcm->tcm_family = AF_UNSPEC; tcm->tcm__pad1 = 0; tcm->tcm__pad2 = 0; tcm->tcm_handle = 0; if (block->q) { tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; tcm->tcm_parent = block->q->handle; } else { tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK; tcm->tcm_block_index = block->index; } if (nla_put_u32(skb, TCA_CHAIN, chain_index)) goto nla_put_failure; if (ops) { if (nla_put_string(skb, TCA_KIND, ops->kind)) goto nla_put_failure; if (ops->tmplt_dump(skb, net, priv) < 0) goto nla_put_failure; } if (extack && extack->_msg && nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) goto out_nlmsg_trim; nlh->nlmsg_len = skb_tail_pointer(skb) - b; return skb->len; out_nlmsg_trim: nla_put_failure: nlmsg_trim(skb, b); return -EMSGSIZE; } static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, u32 seq, u16 flags, int event, bool unicast, struct netlink_ext_ack *extack) { u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; struct tcf_block *block = chain->block; struct net *net = block->net; struct sk_buff *skb; int err = 0; if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC)) return 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, chain->index, net, skb, block, portid, seq, flags, event, extack) <= 0) { kfree_skb(skb); return -EINVAL; } if (unicast) err = rtnl_unicast(skb, net, portid); else err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); return err; } static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, void *tmplt_priv, u32 chain_index, struct tcf_block *block, struct sk_buff *oskb, u32 seq, u16 flags) { u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; struct net *net = block->net; struct sk_buff *skb; if (!rtnl_notify_needed(net, flags, RTNLGRP_TC)) return 0; skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb, block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) { kfree_skb(skb); return -EINVAL; } return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO); } static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net, struct nlattr **tca, struct netlink_ext_ack *extack) { const struct tcf_proto_ops *ops; char name[IFNAMSIZ]; void *tmplt_priv; /* If kind is not set, user did not specify template. */ if (!tca[TCA_KIND]) return 0; if (tcf_proto_check_kind(tca[TCA_KIND], name)) { NL_SET_ERR_MSG(extack, "Specified TC chain template name too long"); return -EINVAL; } ops = tcf_proto_lookup_ops(name, true, extack); if (IS_ERR(ops)) return PTR_ERR(ops); if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump || !ops->tmplt_reoffload) { NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier"); module_put(ops->owner); return -EOPNOTSUPP; } tmplt_priv = ops->tmplt_create(net, chain, tca, extack); if (IS_ERR(tmplt_priv)) { module_put(ops->owner); return PTR_ERR(tmplt_priv); } chain->tmplt_ops = ops; chain->tmplt_priv = tmplt_priv; return 0; } static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops, void *tmplt_priv) { /* If template ops are set, no work to do for us. */ if (!tmplt_ops) return; tmplt_ops->tmplt_destroy(tmplt_priv); module_put(tmplt_ops->owner); } /* Add/delete/get a chain */ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; struct tcmsg *t; u32 parent; u32 chain_index; struct Qdisc *q; struct tcf_chain *chain; struct tcf_block *block; unsigned long cl; int err; replay: q = NULL; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) return err; t = nlmsg_data(n); parent = t->tcm_parent; cl = 0; block = tcf_block_find(net, &q, &parent, &cl, t->tcm_ifindex, t->tcm_block_index, extack); if (IS_ERR(block)) return PTR_ERR(block); chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0); if (chain_index > TC_ACT_EXT_VAL_MASK) { NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit"); err = -EINVAL; goto errout_block; } mutex_lock(&block->lock); chain = tcf_chain_lookup(block, chain_index); if (n->nlmsg_type == RTM_NEWCHAIN) { if (chain) { if (tcf_chain_held_by_acts_only(chain)) { /* The chain exists only because there is * some action referencing it. */ tcf_chain_hold(chain); } else { NL_SET_ERR_MSG(extack, "Filter chain already exists"); err = -EEXIST; goto errout_block_locked; } } else { if (!(n->nlmsg_flags & NLM_F_CREATE)) { NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain"); err = -ENOENT; goto errout_block_locked; } chain = tcf_chain_create(block, chain_index); if (!chain) { NL_SET_ERR_MSG(extack, "Failed to create filter chain"); err = -ENOMEM; goto errout_block_locked; } } } else { if (!chain || tcf_chain_held_by_acts_only(chain)) { NL_SET_ERR_MSG(extack, "Cannot find specified filter chain"); err = -EINVAL; goto errout_block_locked; } tcf_chain_hold(chain); } if (n->nlmsg_type == RTM_NEWCHAIN) { /* Modifying chain requires holding parent block lock. In case * the chain was successfully added, take a reference to the * chain. This ensures that an empty chain does not disappear at * the end of this function. */ tcf_chain_hold(chain); chain->explicitly_created = true; } mutex_unlock(&block->lock); switch (n->nlmsg_type) { case RTM_NEWCHAIN: err = tc_chain_tmplt_add(chain, net, tca, extack); if (err) { tcf_chain_put_explicitly_created(chain); goto errout; } tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL, RTM_NEWCHAIN, false, extack); break; case RTM_DELCHAIN: tfilter_notify_chain(net, skb, block, q, parent, n, chain, RTM_DELTFILTER, extack); /* Flush the chain first as the user requested chain removal. */ tcf_chain_flush(chain, true); /* In case the chain was successfully deleted, put a reference * to the chain previously taken during addition. */ tcf_chain_put_explicitly_created(chain); break; case RTM_GETCHAIN: err = tc_chain_notify(chain, skb, n->nlmsg_seq, n->nlmsg_flags, n->nlmsg_type, true, extack); if (err < 0) NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); break; default: err = -EOPNOTSUPP; NL_SET_ERR_MSG(extack, "Unsupported message type"); goto errout; } errout: tcf_chain_put(chain); errout_block: tcf_block_release(q, block, true); if (err == -EAGAIN) /* Replay the request. */ goto replay; return err; errout_block_locked: mutex_unlock(&block->lock); goto errout_block; } /* called with RTNL */ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct nlattr *tca[TCA_MAX + 1]; struct Qdisc *q = NULL; struct tcf_block *block; struct tcmsg *tcm = nlmsg_data(cb->nlh); struct tcf_chain *chain; long index_start; long index; int err; if (nlmsg_len(cb->nlh) < sizeof(*tcm)) return skb->len; err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy, cb->extack); if (err) return err; if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) { block = tcf_block_refcnt_get(net, tcm->tcm_block_index); if (!block) goto out; } else { const struct Qdisc_class_ops *cops; struct net_device *dev; unsigned long cl = 0; dev = __dev_get_by_index(net, tcm->tcm_ifindex); if (!dev) return skb->len; if (!tcm->tcm_parent) q = rtnl_dereference(dev->qdisc); else q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); if (!q) goto out; cops = q->ops->cl_ops; if (!cops) goto out; if (!cops->tcf_block) goto out; if (TC_H_MIN(tcm->tcm_parent)) { cl = cops->find(q, tcm->tcm_parent); if (cl == 0) goto out; } block = cops->tcf_block(q, cl, NULL); if (!block) goto out; if (tcf_block_shared(block)) q = NULL; } index_start = cb->args[0]; index = 0; mutex_lock(&block->lock); list_for_each_entry(chain, &block->chain_list, list) { if ((tca[TCA_CHAIN] && nla_get_u32(tca[TCA_CHAIN]) != chain->index)) continue; if (index < index_start) { index++; continue; } if (tcf_chain_held_by_acts_only(chain)) continue; err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv, chain->index, net, skb, block, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWCHAIN, NULL); if (err <= 0) break; index++; } mutex_unlock(&block->lock); if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) tcf_block_refcnt_put(block, true); cb->args[0] = index; out: /* If we did no progress, the error (EMSGSIZE) is real */ if (skb->len == 0 && err) return err; return skb->len; } int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action, int police, struct tcf_proto *tp, u32 handle, bool use_action_miss) { int err = 0; #ifdef CONFIG_NET_CLS_ACT exts->type = 0; exts->nr_actions = 0; exts->miss_cookie_node = NULL; /* Note: we do not own yet a reference on net. * This reference might be taken later from tcf_exts_get_net(). */ exts->net = net; exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *), GFP_KERNEL); if (!exts->actions) return -ENOMEM; #endif exts->action = action; exts->police = police; if (!use_action_miss) return 0; err = tcf_exts_miss_cookie_base_alloc(exts, tp, handle); if (err) goto err_miss_alloc; return 0; err_miss_alloc: tcf_exts_destroy(exts); #ifdef CONFIG_NET_CLS_ACT exts->actions = NULL; #endif return err; } EXPORT_SYMBOL(tcf_exts_init_ex); void tcf_exts_destroy(struct tcf_exts *exts) { tcf_exts_miss_cookie_base_destroy(exts); #ifdef CONFIG_NET_CLS_ACT if (exts->actions) { tcf_action_destroy(exts->actions, TCA_ACT_UNBIND); kfree(exts->actions); } exts->nr_actions = 0; #endif } EXPORT_SYMBOL(tcf_exts_destroy); int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, struct tcf_exts *exts, u32 flags, u32 fl_flags, struct netlink_ext_ack *extack) { #ifdef CONFIG_NET_CLS_ACT { int init_res[TCA_ACT_MAX_PRIO] = {}; struct tc_action *act; size_t attr_size = 0; if (exts->police && tb[exts->police]) { struct tc_action_ops *a_o; flags |= TCA_ACT_FLAGS_POLICE | TCA_ACT_FLAGS_BIND; a_o = tc_action_load_ops(tb[exts->police], flags, extack); if (IS_ERR(a_o)) return PTR_ERR(a_o); act = tcf_action_init_1(net, tp, tb[exts->police], rate_tlv, a_o, init_res, flags, extack); module_put(a_o->owner); if (IS_ERR(act)) return PTR_ERR(act); act->type = exts->type = TCA_OLD_COMPAT; exts->actions[0] = act; exts->nr_actions = 1; tcf_idr_insert_many(exts->actions, init_res); } else if (exts->action && tb[exts->action]) { int err; flags |= TCA_ACT_FLAGS_BIND; err = tcf_action_init(net, tp, tb[exts->action], rate_tlv, exts->actions, init_res, &attr_size, flags, fl_flags, extack); if (err < 0) return err; exts->nr_actions = err; } } #else if ((exts->action && tb[exts->action]) || (exts->police && tb[exts->police])) { NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)"); return -EOPNOTSUPP; } #endif return 0; } EXPORT_SYMBOL(tcf_exts_validate_ex); int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, struct tcf_exts *exts, u32 flags, struct netlink_ext_ack *extack) { return tcf_exts_validate_ex(net, tp, tb, rate_tlv, exts, flags, 0, extack); } EXPORT_SYMBOL(tcf_exts_validate); void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) { #ifdef CONFIG_NET_CLS_ACT struct tcf_exts old = *dst; *dst = *src; tcf_exts_destroy(&old); #endif } EXPORT_SYMBOL(tcf_exts_change); #ifdef CONFIG_NET_CLS_ACT static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts) { if (exts->nr_actions == 0) return NULL; else return exts->actions[0]; } #endif int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT struct nlattr *nest; if (exts->action && tcf_exts_has_actions(exts)) { /* * again for backward compatible mode - we want * to work with both old and new modes of entering * tc data even if iproute2 was newer - jhs */ if (exts->type != TCA_OLD_COMPAT) { nest = nla_nest_start_noflag(skb, exts->action); if (nest == NULL) goto nla_put_failure; if (tcf_action_dump(skb, exts->actions, 0, 0, false) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } else if (exts->police) { struct tc_action *act = tcf_exts_first_act(exts); nest = nla_nest_start_noflag(skb, exts->police); if (nest == NULL || !act) goto nla_put_failure; if (tcf_action_dump_old(skb, act, 0, 0) < 0) goto nla_put_failure; nla_nest_end(skb, nest); } } return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -1; #else return 0; #endif } EXPORT_SYMBOL(tcf_exts_dump); int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT struct nlattr *nest; if (!exts->action || !tcf_exts_has_actions(exts)) return 0; nest = nla_nest_start_noflag(skb, exts->action); if (!nest) goto nla_put_failure; if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0) goto nla_put_failure; nla_nest_end(skb, nest); return 0; nla_put_failure: nla_nest_cancel(skb, nest); return -1; #else return 0; #endif } EXPORT_SYMBOL(tcf_exts_terse_dump); int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts) { #ifdef CONFIG_NET_CLS_ACT struct tc_action *a = tcf_exts_first_act(exts); if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0) return -1; #endif return 0; } EXPORT_SYMBOL(tcf_exts_dump_stats); static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) { if (*flags & TCA_CLS_FLAGS_IN_HW) return; *flags |= TCA_CLS_FLAGS_IN_HW; atomic_inc(&block->offloadcnt); } static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) { if (!(*flags & TCA_CLS_FLAGS_IN_HW)) return; *flags &= ~TCA_CLS_FLAGS_IN_HW; atomic_dec(&block->offloadcnt); } static void tc_cls_offload_cnt_update(struct tcf_block *block, struct tcf_proto *tp, u32 *cnt, u32 *flags, u32 diff, bool add) { lockdep_assert_held(&block->cb_lock); spin_lock(&tp->lock); if (add) { if (!*cnt) tcf_block_offload_inc(block, flags); *cnt += diff; } else { *cnt -= diff; if (!*cnt) tcf_block_offload_dec(block, flags); } spin_unlock(&tp->lock); } static void tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, u32 *cnt, u32 *flags) { lockdep_assert_held(&block->cb_lock); spin_lock(&tp->lock); tcf_block_offload_dec(block, flags); *cnt = 0; spin_unlock(&tp->lock); } static int __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, void *type_data, bool err_stop) { struct flow_block_cb *block_cb; int ok_count = 0; int err; list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { err = block_cb->cb(type, type_data, block_cb->cb_priv); if (err) { if (err_stop) return err; } else { ok_count++; } } return ok_count; } int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, void *type_data, bool err_stop, bool rtnl_held) { bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; int ok_count; retry: if (take_rtnl) rtnl_lock(); down_read(&block->cb_lock); /* Need to obtain rtnl lock if block is bound to devs that require it. * In block bind code cb_lock is obtained while holding rtnl, so we must * obtain the locks in same order here. */ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { up_read(&block->cb_lock); take_rtnl = true; goto retry; } ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); return ok_count; } EXPORT_SYMBOL(tc_setup_cb_call); /* Non-destructive filter add. If filter that wasn't already in hardware is * successfully offloaded, increment block offloads counter. On failure, * previously offloaded filter is considered to be intact and offloads counter * is not decremented. */ int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, enum tc_setup_type type, void *type_data, bool err_stop, u32 *flags, unsigned int *in_hw_count, bool rtnl_held) { bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; int ok_count; retry: if (take_rtnl) rtnl_lock(); down_read(&block->cb_lock); /* Need to obtain rtnl lock if block is bound to devs that require it. * In block bind code cb_lock is obtained while holding rtnl, so we must * obtain the locks in same order here. */ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { up_read(&block->cb_lock); take_rtnl = true; goto retry; } /* Make sure all netdevs sharing this block are offload-capable. */ if (block->nooffloaddevcnt && err_stop) { ok_count = -EOPNOTSUPP; goto err_unlock; } ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); if (ok_count < 0) goto err_unlock; if (tp->ops->hw_add) tp->ops->hw_add(tp, type_data); if (ok_count > 0) tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, ok_count, true); err_unlock: up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_add); /* Destructive filter replace. If filter that wasn't already in hardware is * successfully offloaded, increment block offload counter. On failure, * previously offloaded filter is considered to be destroyed and offload counter * is decremented. */ int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, enum tc_setup_type type, void *type_data, bool err_stop, u32 *old_flags, unsigned int *old_in_hw_count, u32 *new_flags, unsigned int *new_in_hw_count, bool rtnl_held) { bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; int ok_count; retry: if (take_rtnl) rtnl_lock(); down_read(&block->cb_lock); /* Need to obtain rtnl lock if block is bound to devs that require it. * In block bind code cb_lock is obtained while holding rtnl, so we must * obtain the locks in same order here. */ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { up_read(&block->cb_lock); take_rtnl = true; goto retry; } /* Make sure all netdevs sharing this block are offload-capable. */ if (block->nooffloaddevcnt && err_stop) { ok_count = -EOPNOTSUPP; goto err_unlock; } tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); if (tp->ops->hw_del) tp->ops->hw_del(tp, type_data); ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); if (ok_count < 0) goto err_unlock; if (tp->ops->hw_add) tp->ops->hw_add(tp, type_data); if (ok_count > 0) tc_cls_offload_cnt_update(block, tp, new_in_hw_count, new_flags, ok_count, true); err_unlock: up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_replace); /* Destroy filter and decrement block offload counter, if filter was previously * offloaded. */ int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, enum tc_setup_type type, void *type_data, bool err_stop, u32 *flags, unsigned int *in_hw_count, bool rtnl_held) { bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; int ok_count; retry: if (take_rtnl) rtnl_lock(); down_read(&block->cb_lock); /* Need to obtain rtnl lock if block is bound to devs that require it. * In block bind code cb_lock is obtained while holding rtnl, so we must * obtain the locks in same order here. */ if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { up_read(&block->cb_lock); take_rtnl = true; goto retry; } ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); if (tp->ops->hw_del) tp->ops->hw_del(tp, type_data); up_read(&block->cb_lock); if (take_rtnl) rtnl_unlock(); return min(ok_count, 0); } EXPORT_SYMBOL(tc_setup_cb_destroy); int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, enum tc_setup_type type, void *type_data, void *cb_priv, u32 *flags, unsigned int *in_hw_count) { int err = cb(type, type_data, cb_priv); if (err) { if (add && tc_skip_sw(*flags)) return err; } else { tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, add); } return 0; } EXPORT_SYMBOL(tc_setup_cb_reoffload); static int tcf_act_get_user_cookie(struct flow_action_entry *entry, const struct tc_action *act) { struct tc_cookie *user_cookie; int err = 0; rcu_read_lock(); user_cookie = rcu_dereference(act->user_cookie); if (user_cookie) { entry->user_cookie = flow_action_cookie_create(user_cookie->data, user_cookie->len, GFP_ATOMIC); if (!entry->user_cookie) err = -ENOMEM; } rcu_read_unlock(); return err; } static void tcf_act_put_user_cookie(struct flow_action_entry *entry) { flow_action_cookie_destroy(entry->user_cookie); } void tc_cleanup_offload_action(struct flow_action *flow_action) { struct flow_action_entry *entry; int i; flow_action_for_each(i, entry, flow_action) { tcf_act_put_user_cookie(entry); if (entry->destructor) entry->destructor(entry->destructor_priv); } } EXPORT_SYMBOL(tc_cleanup_offload_action); static int tc_setup_offload_act(struct tc_action *act, struct flow_action_entry *entry, u32 *index_inc, struct netlink_ext_ack *extack) { #ifdef CONFIG_NET_CLS_ACT if (act->ops->offload_act_setup) { return act->ops->offload_act_setup(act, entry, index_inc, true, extack); } else { NL_SET_ERR_MSG(extack, "Action does not support offload"); return -EOPNOTSUPP; } #else return 0; #endif } int tc_setup_action(struct flow_action *flow_action, struct tc_action *actions[], u32 miss_cookie_base, struct netlink_ext_ack *extack) { int i, j, k, index, err = 0; struct tc_action *act; BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE); BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED); if (!actions) return 0; j = 0; tcf_act_for_each_action(i, act, actions) { struct flow_action_entry *entry; entry = &flow_action->entries[j]; spin_lock_bh(&act->tcfa_lock); err = tcf_act_get_user_cookie(entry, act); if (err) goto err_out_locked; index = 0; err = tc_setup_offload_act(act, entry, &index, extack); if (err) goto err_out_locked; for (k = 0; k < index ; k++) { entry[k].hw_stats = tc_act_hw_stats(act->hw_stats); entry[k].hw_index = act->tcfa_index; entry[k].cookie = (unsigned long)act; entry[k].miss_cookie = tcf_exts_miss_cookie_get(miss_cookie_base, i); } j += index; spin_unlock_bh(&act->tcfa_lock); } err_out: if (err) tc_cleanup_offload_action(flow_action); return err; err_out_locked: spin_unlock_bh(&act->tcfa_lock); goto err_out; } int tc_setup_offload_action(struct flow_action *flow_action, const struct tcf_exts *exts, struct netlink_ext_ack *extack) { #ifdef CONFIG_NET_CLS_ACT u32 miss_cookie_base; if (!exts) return 0; miss_cookie_base = exts->miss_cookie_node ? exts->miss_cookie_node->miss_cookie_base : 0; return tc_setup_action(flow_action, exts->actions, miss_cookie_base, extack); #else return 0; #endif } EXPORT_SYMBOL(tc_setup_offload_action); unsigned int tcf_exts_num_actions(struct tcf_exts *exts) { unsigned int num_acts = 0; struct tc_action *act; int i; tcf_exts_for_each_action(i, act, exts) { if (is_tcf_pedit(act)) num_acts += tcf_pedit_nkeys(act); else num_acts++; } return num_acts; } EXPORT_SYMBOL(tcf_exts_num_actions); #ifdef CONFIG_NET_CLS_ACT static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr, u32 *p_block_index, struct netlink_ext_ack *extack) { *p_block_index = nla_get_u32(block_index_attr); if (!*p_block_index) { NL_SET_ERR_MSG(extack, "Block number may not be zero"); return -EINVAL; } return 0; } int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch, enum flow_block_binder_type binder_type, struct nlattr *block_index_attr, struct netlink_ext_ack *extack) { u32 block_index; int err; if (!block_index_attr) return 0; err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); if (err) return err; qe->info.binder_type = binder_type; qe->info.chain_head_change = tcf_chain_head_change_dflt; qe->info.chain_head_change_priv = &qe->filter_chain; qe->info.block_index = block_index; return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); } EXPORT_SYMBOL(tcf_qevent_init); void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch) { if (qe->info.block_index) tcf_block_put_ext(qe->block, sch, &qe->info); } EXPORT_SYMBOL(tcf_qevent_destroy); int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr, struct netlink_ext_ack *extack) { u32 block_index; int err; if (!block_index_attr) return 0; err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack); if (err) return err; /* Bounce newly-configured block or change in block. */ if (block_index != qe->info.block_index) { NL_SET_ERR_MSG(extack, "Change of blocks is not supported"); return -EINVAL; } return 0; } EXPORT_SYMBOL(tcf_qevent_validate_change); struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb, struct sk_buff **to_free, int *ret) { struct tcf_result cl_res; struct tcf_proto *fl; if (!qe->info.block_index) return skb; fl = rcu_dereference_bh(qe->filter_chain); switch (tcf_classify(skb, NULL, fl, &cl_res, false)) { case TC_ACT_SHOT: qdisc_qstats_drop(sch); __qdisc_drop(skb, to_free); *ret = __NET_XMIT_BYPASS; return NULL; case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: __qdisc_drop(skb, to_free); *ret = __NET_XMIT_STOLEN; return NULL; case TC_ACT_REDIRECT: skb_do_redirect(skb); *ret = __NET_XMIT_STOLEN; return NULL; } return skb; } EXPORT_SYMBOL(tcf_qevent_handle); int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe) { if (!qe->info.block_index) return 0; return nla_put_u32(skb, attr_name, qe->info.block_index); } EXPORT_SYMBOL(tcf_qevent_dump); #endif static __net_init int tcf_net_init(struct net *net) { struct tcf_net *tn = net_generic(net, tcf_net_id); spin_lock_init(&tn->idr_lock); idr_init(&tn->idr); return 0; } static void __net_exit tcf_net_exit(struct net *net) { struct tcf_net *tn = net_generic(net, tcf_net_id); idr_destroy(&tn->idr); } static struct pernet_operations tcf_net_ops = { .init = tcf_net_init, .exit = tcf_net_exit, .id = &tcf_net_id, .size = sizeof(struct tcf_net), }; static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = { {.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED}, {.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED}, {.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter, .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED}, {.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain}, {.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain}, {.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain, .dumpit = tc_dump_chain}, }; static int __init tc_filter_init(void) { int err; tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); if (!tc_filter_wq) return -ENOMEM; err = register_pernet_subsys(&tcf_net_ops); if (err) goto err_register_pernet_subsys; xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1); rtnl_register_many(tc_filter_rtnl_msg_handlers); return 0; err_register_pernet_subsys: destroy_workqueue(tc_filter_wq); return err; } subsys_initcall(tc_filter_init); |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_PIM_H #define __LINUX_PIM_H #include <linux/skbuff.h> #include <asm/byteorder.h> /* Message types - V1 */ #define PIM_V1_VERSION cpu_to_be32(0x10000000) #define PIM_V1_REGISTER 1 /* Message types - V2 */ #define PIM_VERSION 2 /* RFC7761, sec 4.9: * Type * Types for specific PIM messages. PIM Types are: * * Message Type Destination * --------------------------------------------------------------------- * 0 = Hello Multicast to ALL-PIM-ROUTERS * 1 = Register Unicast to RP * 2 = Register-Stop Unicast to source of Register * packet * 3 = Join/Prune Multicast to ALL-PIM-ROUTERS * 4 = Bootstrap Multicast to ALL-PIM-ROUTERS * 5 = Assert Multicast to ALL-PIM-ROUTERS * 6 = Graft (used in PIM-DM only) Unicast to RPF'(S) * 7 = Graft-Ack (used in PIM-DM only) Unicast to source of Graft * packet * 8 = Candidate-RP-Advertisement Unicast to Domain's BSR */ enum { PIM_TYPE_HELLO, PIM_TYPE_REGISTER, PIM_TYPE_REGISTER_STOP, PIM_TYPE_JOIN_PRUNE, PIM_TYPE_BOOTSTRAP, PIM_TYPE_ASSERT, PIM_TYPE_GRAFT, PIM_TYPE_GRAFT_ACK, PIM_TYPE_CANDIDATE_RP_ADV }; #define PIM_NULL_REGISTER cpu_to_be32(0x40000000) /* RFC7761, sec 4.9: * The PIM header common to all PIM messages is: * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |PIM Ver| Type | Reserved | Checksum | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct pimhdr { __u8 type; __u8 reserved; __be16 csum; }; /* PIMv2 register message header layout (ietf-draft-idmr-pimvsm-v2-00.ps */ struct pimreghdr { __u8 type; __u8 reserved; __be16 csum; __be32 flags; }; int pim_rcv_v1(struct sk_buff *skb); static inline bool ipmr_pimsm_enabled(void) { return IS_BUILTIN(CONFIG_IP_PIMSM_V1) || IS_BUILTIN(CONFIG_IP_PIMSM_V2); } static inline struct pimhdr *pim_hdr(const struct sk_buff *skb) { return (struct pimhdr *)skb_transport_header(skb); } static inline u8 pim_hdr_version(const struct pimhdr *pimhdr) { return pimhdr->type >> 4; } static inline u8 pim_hdr_type(const struct pimhdr *pimhdr) { return pimhdr->type & 0xf; } /* check if the address is 224.0.0.13, RFC7761 sec 4.3.1 */ static inline bool pim_ipv4_all_pim_routers(__be32 addr) { return addr == htonl(0xE000000D); } #endif |
| 166 166 166 164 164 162 163 4 163 164 156 14 164 166 185 184 182 176 177 185 28 28 26 26 21 21 20 21 21 20 1 1 21 19 21 28 1 31 19 19 19 19 31 12 31 28 28 19 19 19 19 19 19 19 19 4 4 19 19 4 4 4 4 4 4 4 33 31 31 28 31 31 19 31 31 31 28 31 93 92 31 31 31 31 31 31 94 64 64 57 57 53 57 7 7 4 7 33 33 26 7 26 26 26 35 35 1 34 12 34 30 29 33 30 8 30 34 46 46 45 35 10 10 9 7 8 8 6 7 7 2 2 1 6 6 6 7 5 7 10 45 1 15 15 14 14 12 11 2 7 9 9 190 186 190 2 187 2 1 189 190 7 7 7 7 2 2 2 5 3 3 3 1 1 1 1 11 11 11 11 11 2 2 2 1 2 2 2 2 2 2 8 8 8 8 8 4 4 4 198 197 198 57 30 9 9 10 7 1 10 1 1 2 1 1 2 1 2 9 9 9 9 9 9 9 9 9 9 13 13 13 13 10 11 184 183 182 184 182 183 2 2 181 9 180 181 174 184 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 | /* * hugetlbpage-backed filesystem. Based on ramfs. * * Nadia Yvette Chambers, 2002 * * Copyright (C) 2002 Linus Torvalds. * License: GPL */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/thread_info.h> #include <asm/current.h> #include <linux/falloc.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/file.h> #include <linux/kernel.h> #include <linux/writeback.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/string.h> #include <linux/capability.h> #include <linux/ctype.h> #include <linux/backing-dev.h> #include <linux/hugetlb.h> #include <linux/pagevec.h> #include <linux/fs_parser.h> #include <linux/mman.h> #include <linux/slab.h> #include <linux/dnotify.h> #include <linux/statfs.h> #include <linux/security.h> #include <linux/magic.h> #include <linux/migrate.h> #include <linux/uio.h> #include <linux/uaccess.h> #include <linux/sched/mm.h> #define CREATE_TRACE_POINTS #include <trace/events/hugetlbfs.h> static const struct address_space_operations hugetlbfs_aops; static const struct file_operations hugetlbfs_file_operations; static const struct inode_operations hugetlbfs_dir_inode_operations; static const struct inode_operations hugetlbfs_inode_operations; enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; struct hugetlbfs_fs_context { struct hstate *hstate; unsigned long long max_size_opt; unsigned long long min_size_opt; long max_hpages; long nr_inodes; long min_hpages; enum hugetlbfs_size_type max_val_type; enum hugetlbfs_size_type min_val_type; kuid_t uid; kgid_t gid; umode_t mode; }; int sysctl_hugetlb_shm_group; enum hugetlb_param { Opt_gid, Opt_min_size, Opt_mode, Opt_nr_inodes, Opt_pagesize, Opt_size, Opt_uid, }; static const struct fs_parameter_spec hugetlb_fs_parameters[] = { fsparam_gid ("gid", Opt_gid), fsparam_string("min_size", Opt_min_size), fsparam_u32oct("mode", Opt_mode), fsparam_string("nr_inodes", Opt_nr_inodes), fsparam_string("pagesize", Opt_pagesize), fsparam_string("size", Opt_size), fsparam_uid ("uid", Opt_uid), {} }; /* * Mask used when checking the page offset value passed in via system * calls. This value will be converted to a loff_t which is signed. * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the * value. The extra bit (- 1 in the shift value) is to take the sign * bit into account. */ #define PGOFF_LOFFT_MAX \ (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); vm_flags_t vm_flags; /* * vma address alignment (but not the pgoff alignment) has * already been checked by prepare_hugepage_range. If you add * any error returns here, do so after setting VM_HUGETLB, so * is_vm_hugetlb_page tests below unmap_region go the right * way when do_mmap unwinds (may be important on powerpc * and ia64). */ vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); vma->vm_ops = &hugetlb_vm_ops; /* * page based offset in vm_pgoff could be sufficiently large to * overflow a loff_t when converted to byte offset. This can * only happen on architectures where sizeof(loff_t) == * sizeof(unsigned long). So, only check in those instances. */ if (sizeof(unsigned long) == sizeof(loff_t)) { if (vma->vm_pgoff & PGOFF_LOFFT_MAX) return -EINVAL; } /* must be huge page aligned */ if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; vma_len = (loff_t)(vma->vm_end - vma->vm_start); len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); /* check for overflow */ if (len < vma_len) return -EINVAL; inode_lock(inode); file_accessed(file); ret = -ENOMEM; vm_flags = vma->vm_flags; /* * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip * reserving here. Note: only for SHM hugetlbfs file, the inode * flag S_PRIVATE is set. */ if (inode->i_flags & S_PRIVATE) vm_flags |= VM_NORESERVE; if (!hugetlb_reserve_pages(inode, vma->vm_pgoff >> huge_page_order(h), len >> huge_page_shift(h), vma, vm_flags)) goto out; ret = 0; if (vma->vm_flags & VM_WRITE && inode->i_size < len) i_size_write(inode, len); out: inode_unlock(inode); return ret; } /* * Called under mmap_write_lock(mm). */ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long addr0 = 0; struct hstate *h = hstate_file(file); if (len & ~huge_page_mask(h)) return -EINVAL; if (flags & MAP_FIXED) { if (addr & ~huge_page_mask(h)) return -EINVAL; if (prepare_hugepage_range(file, addr, len)) return -EINVAL; } if (addr) addr0 = ALIGN(addr, huge_page_size(h)); return mm_get_unmapped_area_vmflags(current->mm, file, addr0, len, pgoff, flags, 0); } /* * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset. * Returns the maximum number of bytes one can read without touching the 1st raw * HWPOISON page. * * The implementation borrows the iteration logic from copy_page_to_iter*. */ static size_t adjust_range_hwpoison(struct folio *folio, size_t offset, size_t bytes) { struct page *page; size_t n = 0; size_t res = 0; /* First page to start the loop. */ page = folio_page(folio, offset / PAGE_SIZE); offset %= PAGE_SIZE; while (1) { if (is_raw_hwpoison_page_in_hugepage(page)) break; /* Safe to read n bytes without touching HWPOISON subpage. */ n = min(bytes, (size_t)PAGE_SIZE - offset); res += n; bytes -= n; if (!bytes || !n) break; offset += n; if (offset == PAGE_SIZE) { page = nth_page(page, 1); offset = 0; } } return res; } /* * Support for read() - Find the page attached to f_mapping and copy out the * data. This provides functionality similar to filemap_read(). */ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct file *file = iocb->ki_filp; struct hstate *h = hstate_file(file); struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; unsigned long index = iocb->ki_pos >> huge_page_shift(h); unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); unsigned long end_index; loff_t isize; ssize_t retval = 0; while (iov_iter_count(to)) { struct folio *folio; size_t nr, copied, want; /* nr is the maximum number of bytes to copy from this page */ nr = huge_page_size(h); isize = i_size_read(inode); if (!isize) break; end_index = (isize - 1) >> huge_page_shift(h); if (index > end_index) break; if (index == end_index) { nr = ((isize - 1) & ~huge_page_mask(h)) + 1; if (nr <= offset) break; } nr = nr - offset; /* Find the folio */ folio = filemap_lock_hugetlb_folio(h, mapping, index); if (IS_ERR(folio)) { /* * We have a HOLE, zero out the user-buffer for the * length of the hole or request. */ copied = iov_iter_zero(nr, to); } else { folio_unlock(folio); if (!folio_test_hwpoison(folio)) want = nr; else { /* * Adjust how many bytes safe to read without * touching the 1st raw HWPOISON page after * offset. */ want = adjust_range_hwpoison(folio, offset, nr); if (want == 0) { folio_put(folio); retval = -EIO; break; } } /* * We have the folio, copy it to user space buffer. */ copied = copy_folio_to_iter(folio, offset, want, to); folio_put(folio); } offset += copied; retval += copied; if (copied != nr && iov_iter_count(to)) { if (!retval) retval = -EFAULT; break; } index += offset >> huge_page_shift(h); offset &= ~huge_page_mask(h); } iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; return retval; } static int hugetlbfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, struct folio **foliop, void **fsdata) { return -EINVAL; } static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct folio *folio, void *fsdata) { BUG(); return -EINVAL; } static void hugetlb_delete_from_page_cache(struct folio *folio) { folio_clear_dirty(folio); folio_clear_uptodate(folio); filemap_remove_folio(folio); } /* * Called with i_mmap_rwsem held for inode based vma maps. This makes * sure vma (and vm_mm) will not go away. We also hold the hugetlb fault * mutex for the page in the mapping. So, we can not race with page being * faulted into the vma. */ static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { pte_t *ptep, pte; ptep = hugetlb_walk(vma, addr, huge_page_size(hstate_vma(vma))); if (!ptep) return false; pte = huge_ptep_get(vma->vm_mm, addr, ptep); if (huge_pte_none(pte) || !pte_present(pte)) return false; if (pte_pfn(pte) == pfn) return true; return false; } /* * Can vma_offset_start/vma_offset_end overflow on 32-bit arches? * No, because the interval tree returns us only those vmas * which overlap the truncated area starting at pgoff, * and no vma on a 32-bit arch can span beyond the 4GB. */ static unsigned long vma_offset_start(struct vm_area_struct *vma, pgoff_t start) { unsigned long offset = 0; if (vma->vm_pgoff < start) offset = (start - vma->vm_pgoff) << PAGE_SHIFT; return vma->vm_start + offset; } static unsigned long vma_offset_end(struct vm_area_struct *vma, pgoff_t end) { unsigned long t_end; if (!end) return vma->vm_end; t_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) + vma->vm_start; if (t_end > vma->vm_end) t_end = vma->vm_end; return t_end; } /* * Called with hugetlb fault mutex held. Therefore, no more mappings to * this folio can be created while executing the routine. */ static void hugetlb_unmap_file_folio(struct hstate *h, struct address_space *mapping, struct folio *folio, pgoff_t index) { struct rb_root_cached *root = &mapping->i_mmap; struct hugetlb_vma_lock *vma_lock; unsigned long pfn = folio_pfn(folio); struct vm_area_struct *vma; unsigned long v_start; unsigned long v_end; pgoff_t start, end; start = index * pages_per_huge_page(h); end = (index + 1) * pages_per_huge_page(h); i_mmap_lock_write(mapping); retry: vma_lock = NULL; vma_interval_tree_foreach(vma, root, start, end - 1) { v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); if (!hugetlb_vma_maps_pfn(vma, v_start, pfn)) continue; if (!hugetlb_vma_trylock_write(vma)) { vma_lock = vma->vm_private_data; /* * If we can not get vma lock, we need to drop * immap_sema and take locks in order. First, * take a ref on the vma_lock structure so that * we can be guaranteed it will not go away when * dropping immap_sema. */ kref_get(&vma_lock->refs); break; } unmap_hugepage_range(vma, v_start, v_end, NULL, ZAP_FLAG_DROP_MARKER); hugetlb_vma_unlock_write(vma); } i_mmap_unlock_write(mapping); if (vma_lock) { /* * Wait on vma_lock. We know it is still valid as we have * a reference. We must 'open code' vma locking as we do * not know if vma_lock is still attached to vma. */ down_write(&vma_lock->rw_sema); i_mmap_lock_write(mapping); vma = vma_lock->vma; if (!vma) { /* * If lock is no longer attached to vma, then just * unlock, drop our reference and retry looking for * other vmas. */ up_write(&vma_lock->rw_sema); kref_put(&vma_lock->refs, hugetlb_vma_lock_release); goto retry; } /* * vma_lock is still attached to vma. Check to see if vma * still maps page and if so, unmap. */ v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); if (hugetlb_vma_maps_pfn(vma, v_start, pfn)) unmap_hugepage_range(vma, v_start, v_end, NULL, ZAP_FLAG_DROP_MARKER); kref_put(&vma_lock->refs, hugetlb_vma_lock_release); hugetlb_vma_unlock_write(vma); goto retry; } } static void hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end, zap_flags_t zap_flags) { struct vm_area_struct *vma; /* * end == 0 indicates that the entire range after start should be * unmapped. Note, end is exclusive, whereas the interval tree takes * an inclusive "last". */ vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) { unsigned long v_start; unsigned long v_end; if (!hugetlb_vma_trylock_write(vma)) continue; v_start = vma_offset_start(vma, start); v_end = vma_offset_end(vma, end); unmap_hugepage_range(vma, v_start, v_end, NULL, zap_flags); /* * Note that vma lock only exists for shared/non-private * vmas. Therefore, lock is not held when calling * unmap_hugepage_range for private vmas. */ hugetlb_vma_unlock_write(vma); } } /* * Called with hugetlb fault mutex held. * Returns true if page was actually removed, false otherwise. */ static bool remove_inode_single_folio(struct hstate *h, struct inode *inode, struct address_space *mapping, struct folio *folio, pgoff_t index, bool truncate_op) { bool ret = false; /* * If folio is mapped, it was faulted in after being * unmapped in caller. Unmap (again) while holding * the fault mutex. The mutex will prevent faults * until we finish removing the folio. */ if (unlikely(folio_mapped(folio))) hugetlb_unmap_file_folio(h, mapping, folio, index); folio_lock(folio); /* * We must remove the folio from page cache before removing * the region/ reserve map (hugetlb_unreserve_pages). In * rare out of memory conditions, removal of the region/reserve * map could fail. Correspondingly, the subpool and global * reserve usage count can need to be adjusted. */ VM_BUG_ON_FOLIO(folio_test_hugetlb_restore_reserve(folio), folio); hugetlb_delete_from_page_cache(folio); ret = true; if (!truncate_op) { if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) hugetlb_fix_reserve_counts(inode); } folio_unlock(folio); return ret; } /* * remove_inode_hugepages handles two distinct cases: truncation and hole * punch. There are subtle differences in operation for each case. * * truncation is indicated by end of range being LLONG_MAX * In this case, we first scan the range and release found pages. * After releasing pages, hugetlb_unreserve_pages cleans up region/reserve * maps and global counts. Page faults can race with truncation. * During faults, hugetlb_no_page() checks i_size before page allocation, * and again after obtaining page table lock. It will 'back out' * allocations in the truncated range. * hole punch is indicated if end is not LLONG_MAX * In the hole punch case we scan the range and release found pages. * Only when releasing a page is the associated region/reserve map * deleted. The region/reserve map for ranges without associated * pages are not modified. Page faults can race with hole punch. * This is indicated if we find a mapped page. * Note: If the passed end of range value is beyond the end of file, but * not LLONG_MAX this routine still performs a hole punch operation. */ static void remove_inode_hugepages(struct inode *inode, loff_t lstart, loff_t lend) { struct hstate *h = hstate_inode(inode); struct address_space *mapping = &inode->i_data; const pgoff_t end = lend >> PAGE_SHIFT; struct folio_batch fbatch; pgoff_t next, index; int i, freed = 0; bool truncate_op = (lend == LLONG_MAX); folio_batch_init(&fbatch); next = lstart >> PAGE_SHIFT; while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) { for (i = 0; i < folio_batch_count(&fbatch); ++i) { struct folio *folio = fbatch.folios[i]; u32 hash = 0; index = folio->index >> huge_page_order(h); hash = hugetlb_fault_mutex_hash(mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* * Remove folio that was part of folio_batch. */ if (remove_inode_single_folio(h, inode, mapping, folio, index, truncate_op)) freed++; mutex_unlock(&hugetlb_fault_mutex_table[hash]); } folio_batch_release(&fbatch); cond_resched(); } if (truncate_op) (void)hugetlb_unreserve_pages(inode, lstart >> huge_page_shift(h), LONG_MAX, freed); } static void hugetlbfs_evict_inode(struct inode *inode) { struct resv_map *resv_map; trace_hugetlbfs_evict_inode(inode); remove_inode_hugepages(inode, 0, LLONG_MAX); /* * Get the resv_map from the address space embedded in the inode. * This is the address space which points to any resv_map allocated * at inode creation time. If this is a device special inode, * i_mapping may not point to the original address space. */ resv_map = (struct resv_map *)(&inode->i_data)->i_private_data; /* Only regular and link inodes have associated reserve maps */ if (resv_map) resv_map_release(&resv_map->refs); clear_inode(inode); } static void hugetlb_vmtruncate(struct inode *inode, loff_t offset) { pgoff_t pgoff; struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); BUG_ON(offset & ~huge_page_mask(h)); pgoff = offset >> PAGE_SHIFT; i_size_write(inode, offset); i_mmap_lock_write(mapping); if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0, ZAP_FLAG_DROP_MARKER); i_mmap_unlock_write(mapping); remove_inode_hugepages(inode, offset, LLONG_MAX); } static void hugetlbfs_zero_partial_page(struct hstate *h, struct address_space *mapping, loff_t start, loff_t end) { pgoff_t idx = start >> huge_page_shift(h); struct folio *folio; folio = filemap_lock_hugetlb_folio(h, mapping, idx); if (IS_ERR(folio)) return; start = start & ~huge_page_mask(h); end = end & ~huge_page_mask(h); if (!end) end = huge_page_size(h); folio_zero_segment(folio, (size_t)start, (size_t)end); folio_unlock(folio); folio_put(folio); } static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) { struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); loff_t hpage_size = huge_page_size(h); loff_t hole_start, hole_end; /* * hole_start and hole_end indicate the full pages within the hole. */ hole_start = round_up(offset, hpage_size); hole_end = round_down(offset + len, hpage_size); inode_lock(inode); /* protected by i_rwsem */ if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { inode_unlock(inode); return -EPERM; } i_mmap_lock_write(mapping); /* If range starts before first full page, zero partial page. */ if (offset < hole_start) hugetlbfs_zero_partial_page(h, mapping, offset, min(offset + len, hole_start)); /* Unmap users of full pages in the hole. */ if (hole_end > hole_start) { if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) hugetlb_vmdelete_list(&mapping->i_mmap, hole_start >> PAGE_SHIFT, hole_end >> PAGE_SHIFT, 0); } /* If range extends beyond last full page, zero partial page. */ if ((offset + len) > hole_end && (offset + len) > hole_start) hugetlbfs_zero_partial_page(h, mapping, hole_end, offset + len); i_mmap_unlock_write(mapping); /* Remove full pages from the file. */ if (hole_end > hole_start) remove_inode_hugepages(inode, hole_start, hole_end); inode_unlock(inode); return 0; } static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_inode(inode); struct vm_area_struct pseudo_vma; struct mm_struct *mm = current->mm; loff_t hpage_size = huge_page_size(h); unsigned long hpage_shift = huge_page_shift(h); pgoff_t start, index, end; int error; u32 hash; if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) return -EOPNOTSUPP; if (mode & FALLOC_FL_PUNCH_HOLE) { error = hugetlbfs_punch_hole(inode, offset, len); goto out_nolock; } /* * Default preallocate case. * For this range, start is rounded down and end is rounded up * as well as being converted to page offsets. */ start = offset >> hpage_shift; end = (offset + len + hpage_size - 1) >> hpage_shift; inode_lock(inode); /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ error = inode_newsize_ok(inode, offset + len); if (error) goto out; if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { error = -EPERM; goto out; } /* * Initialize a pseudo vma as this is required by the huge page * allocation routines. */ vma_init(&pseudo_vma, mm); vm_flags_init(&pseudo_vma, VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pseudo_vma.vm_file = file; for (index = start; index < end; index++) { /* * This is supposed to be the vaddr where the page is being * faulted in, but we have no vaddr here. */ struct folio *folio; unsigned long addr; cond_resched(); /* * fallocate(2) manpage permits EINTR; we may have been * interrupted because we are using up too much memory. */ if (signal_pending(current)) { error = -EINTR; break; } /* addr is the offset within the file (zero based) */ addr = index * hpage_size; /* mutex taken here, fault path and hole punch */ hash = hugetlb_fault_mutex_hash(mapping, index); mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ folio = filemap_get_folio(mapping, index << huge_page_order(h)); if (!IS_ERR(folio)) { folio_put(folio); mutex_unlock(&hugetlb_fault_mutex_table[hash]); continue; } /* * Allocate folio without setting the avoid_reserve argument. * There certainly are no reserves associated with the * pseudo_vma. However, there could be shared mappings with * reserves for the file at the inode level. If we fallocate * folios in these areas, we need to consume the reserves * to keep reservation accounting consistent. */ folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); if (IS_ERR(folio)) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); error = PTR_ERR(folio); goto out; } folio_zero_user(folio, addr); __folio_mark_uptodate(folio); error = hugetlb_add_to_page_cache(folio, mapping, index); if (unlikely(error)) { restore_reserve_on_error(h, &pseudo_vma, addr, folio); folio_put(folio); mutex_unlock(&hugetlb_fault_mutex_table[hash]); goto out; } mutex_unlock(&hugetlb_fault_mutex_table[hash]); folio_set_hugetlb_migratable(folio); /* * folio_unlock because locked by hugetlb_add_to_page_cache() * folio_put() due to reference from alloc_hugetlb_folio() */ folio_unlock(folio); folio_put(folio); } if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) i_size_write(inode, offset + len); inode_set_ctime_current(inode); out: inode_unlock(inode); out_nolock: trace_hugetlbfs_fallocate(inode, mode, offset, len, error); return error; } static int hugetlbfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { struct inode *inode = d_inode(dentry); struct hstate *h = hstate_inode(inode); int error; unsigned int ia_valid = attr->ia_valid; struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); error = setattr_prepare(idmap, dentry, attr); if (error) return error; trace_hugetlbfs_setattr(inode, dentry, attr); if (ia_valid & ATTR_SIZE) { loff_t oldsize = inode->i_size; loff_t newsize = attr->ia_size; if (newsize & ~huge_page_mask(h)) return -EINVAL; /* protected by i_rwsem */ if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || (newsize > oldsize && (info->seals & F_SEAL_GROW))) return -EPERM; hugetlb_vmtruncate(inode, newsize); } setattr_copy(idmap, inode, attr); mark_inode_dirty(inode); return 0; } static struct inode *hugetlbfs_get_root(struct super_block *sb, struct hugetlbfs_fs_context *ctx) { struct inode *inode; inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); inode->i_mode = S_IFDIR | ctx->mode; inode->i_uid = ctx->uid; inode->i_gid = ctx->gid; simple_inode_init_ts(inode); inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); lockdep_annotate_inode_mutex_key(inode); } return inode; } /* * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never * be taken from reclaim -- unlike regular filesystems. This needs an * annotation because huge_pmd_share() does an allocation under hugetlb's * i_mmap_rwsem. */ static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; static struct inode *hugetlbfs_get_inode(struct super_block *sb, struct mnt_idmap *idmap, struct inode *dir, umode_t mode, dev_t dev) { struct inode *inode; struct resv_map *resv_map = NULL; /* * Reserve maps are only needed for inodes that can have associated * page allocations. */ if (S_ISREG(mode) || S_ISLNK(mode)) { resv_map = resv_map_alloc(); if (!resv_map) return NULL; } inode = new_inode(sb); if (inode) { struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); inode->i_ino = get_next_ino(); inode_init_owner(idmap, inode, dir, mode); lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, &hugetlbfs_i_mmap_rwsem_key); inode->i_mapping->a_ops = &hugetlbfs_aops; simple_inode_init_ts(inode); inode->i_mapping->i_private_data = resv_map; info->seals = F_SEAL_SEAL; switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); break; case S_IFREG: inode->i_op = &hugetlbfs_inode_operations; inode->i_fop = &hugetlbfs_file_operations; break; case S_IFDIR: inode->i_op = &hugetlbfs_dir_inode_operations; inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; inode_nohighmem(inode); break; } lockdep_annotate_inode_mutex_key(inode); trace_hugetlbfs_alloc_inode(inode, dir, mode); } else { if (resv_map) kref_put(&resv_map->refs, resv_map_release); } return inode; } /* * File creation. Allocate an inode, and we're done.. */ static int hugetlbfs_mknod(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, dev); if (!inode) return -ENOSPC; inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); d_instantiate(dentry, inode); dget(dentry);/* Extra count - pin the dentry in core */ return 0; } static struct dentry *hugetlbfs_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode) { int retval = hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFDIR, 0); if (!retval) inc_nlink(dir); return ERR_PTR(retval); } static int hugetlbfs_create(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return hugetlbfs_mknod(idmap, dir, dentry, mode | S_IFREG, 0); } static int hugetlbfs_tmpfile(struct mnt_idmap *idmap, struct inode *dir, struct file *file, umode_t mode) { struct inode *inode; inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode | S_IFREG, 0); if (!inode) return -ENOSPC; inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); d_tmpfile(file, inode); return finish_open_simple(file, 0); } static int hugetlbfs_symlink(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, const char *symname) { const umode_t mode = S_IFLNK|S_IRWXUGO; struct inode *inode; int error = -ENOSPC; inode = hugetlbfs_get_inode(dir->i_sb, idmap, dir, mode, 0); if (inode) { int l = strlen(symname)+1; error = page_symlink(inode, symname, l); if (!error) { d_instantiate(dentry, inode); dget(dentry); } else iput(inode); } inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); return error; } #ifdef CONFIG_MIGRATION static int hugetlbfs_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { int rc; rc = migrate_huge_page_move_mapping(mapping, dst, src); if (rc != MIGRATEPAGE_SUCCESS) return rc; if (hugetlb_folio_subpool(src)) { hugetlb_set_folio_subpool(dst, hugetlb_folio_subpool(src)); hugetlb_set_folio_subpool(src, NULL); } folio_migrate_flags(dst, src); return MIGRATEPAGE_SUCCESS; } #else #define hugetlbfs_migrate_folio NULL #endif static int hugetlbfs_error_remove_folio(struct address_space *mapping, struct folio *folio) { return 0; } /* * Display the mount options in /proc/mounts. */ static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); struct hugepage_subpool *spool = sbinfo->spool; unsigned long hpage_size = huge_page_size(sbinfo->hstate); unsigned hpage_shift = huge_page_shift(sbinfo->hstate); char mod; if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, sbinfo->uid)); if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, sbinfo->gid)); if (sbinfo->mode != 0755) seq_printf(m, ",mode=%o", sbinfo->mode); if (sbinfo->max_inodes != -1) seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); hpage_size /= 1024; mod = 'K'; if (hpage_size >= 1024) { hpage_size /= 1024; mod = 'M'; } seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); if (spool) { if (spool->max_hpages != -1) seq_printf(m, ",size=%llu", (unsigned long long)spool->max_hpages << hpage_shift); if (spool->min_hpages != -1) seq_printf(m, ",min_size=%llu", (unsigned long long)spool->min_hpages << hpage_shift); } return 0; } static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); struct hstate *h = hstate_inode(d_inode(dentry)); u64 id = huge_encode_dev(dentry->d_sb->s_dev); buf->f_fsid = u64_to_fsid(id); buf->f_type = HUGETLBFS_MAGIC; buf->f_bsize = huge_page_size(h); if (sbinfo) { spin_lock(&sbinfo->stat_lock); /* If no limits set, just report 0 or -1 for max/free/used * blocks, like simple_statfs() */ if (sbinfo->spool) { long free_pages; spin_lock_irq(&sbinfo->spool->lock); buf->f_blocks = sbinfo->spool->max_hpages; free_pages = sbinfo->spool->max_hpages - sbinfo->spool->used_hpages; buf->f_bavail = buf->f_bfree = free_pages; spin_unlock_irq(&sbinfo->spool->lock); buf->f_files = sbinfo->max_inodes; buf->f_ffree = sbinfo->free_inodes; } spin_unlock(&sbinfo->stat_lock); } buf->f_namelen = NAME_MAX; return 0; } static void hugetlbfs_put_super(struct super_block *sb) { struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); if (sbi) { sb->s_fs_info = NULL; if (sbi->spool) hugepage_put_subpool(sbi->spool); kfree(sbi); } } static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) { if (sbinfo->free_inodes >= 0) { spin_lock(&sbinfo->stat_lock); if (unlikely(!sbinfo->free_inodes)) { spin_unlock(&sbinfo->stat_lock); return 0; } sbinfo->free_inodes--; spin_unlock(&sbinfo->stat_lock); } return 1; } static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) { if (sbinfo->free_inodes >= 0) { spin_lock(&sbinfo->stat_lock); sbinfo->free_inodes++; spin_unlock(&sbinfo->stat_lock); } } static struct kmem_cache *hugetlbfs_inode_cachep; static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) { struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); struct hugetlbfs_inode_info *p; if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) return NULL; p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL); if (unlikely(!p)) { hugetlbfs_inc_free_inodes(sbinfo); return NULL; } return &p->vfs_inode; } static void hugetlbfs_free_inode(struct inode *inode) { trace_hugetlbfs_free_inode(inode); kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); } static void hugetlbfs_destroy_inode(struct inode *inode) { hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); } static const struct address_space_operations hugetlbfs_aops = { .write_begin = hugetlbfs_write_begin, .write_end = hugetlbfs_write_end, .dirty_folio = noop_dirty_folio, .migrate_folio = hugetlbfs_migrate_folio, .error_remove_folio = hugetlbfs_error_remove_folio, }; static void init_once(void *foo) { struct hugetlbfs_inode_info *ei = foo; inode_init_once(&ei->vfs_inode); } static const struct file_operations hugetlbfs_file_operations = { .read_iter = hugetlbfs_read_iter, .mmap = hugetlbfs_file_mmap, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, .llseek = default_llseek, .fallocate = hugetlbfs_fallocate, .fop_flags = FOP_HUGE_PAGES, }; static const struct inode_operations hugetlbfs_dir_inode_operations = { .create = hugetlbfs_create, .lookup = simple_lookup, .link = simple_link, .unlink = simple_unlink, .symlink = hugetlbfs_symlink, .mkdir = hugetlbfs_mkdir, .rmdir = simple_rmdir, .mknod = hugetlbfs_mknod, .rename = simple_rename, .setattr = hugetlbfs_setattr, .tmpfile = hugetlbfs_tmpfile, }; static const struct inode_operations hugetlbfs_inode_operations = { .setattr = hugetlbfs_setattr, }; static const struct super_operations hugetlbfs_ops = { .alloc_inode = hugetlbfs_alloc_inode, .free_inode = hugetlbfs_free_inode, .destroy_inode = hugetlbfs_destroy_inode, .evict_inode = hugetlbfs_evict_inode, .statfs = hugetlbfs_statfs, .put_super = hugetlbfs_put_super, .show_options = hugetlbfs_show_options, }; /* * Convert size option passed from command line to number of huge pages * in the pool specified by hstate. Size option could be in bytes * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). */ static long hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, enum hugetlbfs_size_type val_type) { if (val_type == NO_SIZE) return -1; if (val_type == SIZE_PERCENT) { size_opt <<= huge_page_shift(h); size_opt *= h->max_huge_pages; do_div(size_opt, 100); } size_opt >>= huge_page_shift(h); return size_opt; } /* * Parse one mount parameter. */ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct hugetlbfs_fs_context *ctx = fc->fs_private; struct fs_parse_result result; struct hstate *h; char *rest; unsigned long ps; int opt; opt = fs_parse(fc, hugetlb_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_uid: ctx->uid = result.uid; return 0; case Opt_gid: ctx->gid = result.gid; return 0; case Opt_mode: ctx->mode = result.uint_32 & 01777U; return 0; case Opt_size: /* memparse() will accept a K/M/G without a digit */ if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->max_size_opt = memparse(param->string, &rest); ctx->max_val_type = SIZE_STD; if (*rest == '%') ctx->max_val_type = SIZE_PERCENT; return 0; case Opt_nr_inodes: /* memparse() will accept a K/M/G without a digit */ if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->nr_inodes = memparse(param->string, &rest); return 0; case Opt_pagesize: ps = memparse(param->string, &rest); h = size_to_hstate(ps); if (!h) { pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); return -EINVAL; } ctx->hstate = h; return 0; case Opt_min_size: /* memparse() will accept a K/M/G without a digit */ if (!param->string || !isdigit(param->string[0])) goto bad_val; ctx->min_size_opt = memparse(param->string, &rest); ctx->min_val_type = SIZE_STD; if (*rest == '%') ctx->min_val_type = SIZE_PERCENT; return 0; default: return -EINVAL; } bad_val: return invalfc(fc, "Bad value '%s' for mount option '%s'\n", param->string, param->key); } /* * Validate the parsed options. */ static int hugetlbfs_validate(struct fs_context *fc) { struct hugetlbfs_fs_context *ctx = fc->fs_private; /* * Use huge page pool size (in hstate) to convert the size * options to number of huge pages. If NO_SIZE, -1 is returned. */ ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate, ctx->max_size_opt, ctx->max_val_type); ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate, ctx->min_size_opt, ctx->min_val_type); /* * If max_size was specified, then min_size must be smaller */ if (ctx->max_val_type > NO_SIZE && ctx->min_hpages > ctx->max_hpages) { pr_err("Minimum size can not be greater than maximum size\n"); return -EINVAL; } return 0; } static int hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) { struct hugetlbfs_fs_context *ctx = fc->fs_private; struct hugetlbfs_sb_info *sbinfo; sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); if (!sbinfo) return -ENOMEM; sb->s_fs_info = sbinfo; spin_lock_init(&sbinfo->stat_lock); sbinfo->hstate = ctx->hstate; sbinfo->max_inodes = ctx->nr_inodes; sbinfo->free_inodes = ctx->nr_inodes; sbinfo->spool = NULL; sbinfo->uid = ctx->uid; sbinfo->gid = ctx->gid; sbinfo->mode = ctx->mode; /* * Allocate and initialize subpool if maximum or minimum size is * specified. Any needed reservations (for minimum size) are taken * when the subpool is created. */ if (ctx->max_hpages != -1 || ctx->min_hpages != -1) { sbinfo->spool = hugepage_new_subpool(ctx->hstate, ctx->max_hpages, ctx->min_hpages); if (!sbinfo->spool) goto out_free; } sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = huge_page_size(ctx->hstate); sb->s_blocksize_bits = huge_page_shift(ctx->hstate); sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; /* * Due to the special and limited functionality of hugetlbfs, it does * not work well as a stacking filesystem. */ sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); if (!sb->s_root) goto out_free; return 0; out_free: kfree(sbinfo->spool); kfree(sbinfo); return -ENOMEM; } static int hugetlbfs_get_tree(struct fs_context *fc) { int err = hugetlbfs_validate(fc); if (err) return err; return get_tree_nodev(fc, hugetlbfs_fill_super); } static void hugetlbfs_fs_context_free(struct fs_context *fc) { kfree(fc->fs_private); } static const struct fs_context_operations hugetlbfs_fs_context_ops = { .free = hugetlbfs_fs_context_free, .parse_param = hugetlbfs_parse_param, .get_tree = hugetlbfs_get_tree, }; static int hugetlbfs_init_fs_context(struct fs_context *fc) { struct hugetlbfs_fs_context *ctx; ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->max_hpages = -1; /* No limit on size by default */ ctx->nr_inodes = -1; /* No limit on number of inodes by default */ ctx->uid = current_fsuid(); ctx->gid = current_fsgid(); ctx->mode = 0755; ctx->hstate = &default_hstate; ctx->min_hpages = -1; /* No default minimum size */ ctx->max_val_type = NO_SIZE; ctx->min_val_type = NO_SIZE; fc->fs_private = ctx; fc->ops = &hugetlbfs_fs_context_ops; return 0; } static struct file_system_type hugetlbfs_fs_type = { .name = "hugetlbfs", .init_fs_context = hugetlbfs_init_fs_context, .parameters = hugetlb_fs_parameters, .kill_sb = kill_litter_super, .fs_flags = FS_ALLOW_IDMAP, }; static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; static int can_do_hugetlb_shm(void) { kgid_t shm_group; shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); return capable(CAP_IPC_LOCK) || in_group_p(shm_group); } static int get_hstate_idx(int page_size_log) { struct hstate *h = hstate_sizelog(page_size_log); if (!h) return -1; return hstate_index(h); } /* * Note that size should be aligned to proper hugepage size in caller side, * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. */ struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, int creat_flags, int page_size_log) { struct inode *inode; struct vfsmount *mnt; int hstate_idx; struct file *file; hstate_idx = get_hstate_idx(page_size_log); if (hstate_idx < 0) return ERR_PTR(-ENODEV); mnt = hugetlbfs_vfsmount[hstate_idx]; if (!mnt) return ERR_PTR(-ENOENT); if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { struct ucounts *ucounts = current_ucounts(); if (user_shm_lock(size, ucounts)) { pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n", current->comm, current->pid); user_shm_unlock(size, ucounts); } return ERR_PTR(-EPERM); } file = ERR_PTR(-ENOSPC); /* hugetlbfs_vfsmount[] mounts do not use idmapped mounts. */ inode = hugetlbfs_get_inode(mnt->mnt_sb, &nop_mnt_idmap, NULL, S_IFREG | S_IRWXUGO, 0); if (!inode) goto out; if (creat_flags == HUGETLB_SHMFS_INODE) inode->i_flags |= S_PRIVATE; inode->i_size = size; clear_nlink(inode); if (!hugetlb_reserve_pages(inode, 0, size >> huge_page_shift(hstate_inode(inode)), NULL, acctflag)) file = ERR_PTR(-ENOMEM); else file = alloc_file_pseudo(inode, mnt, name, O_RDWR, &hugetlbfs_file_operations); if (!IS_ERR(file)) return file; iput(inode); out: return file; } static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h) { struct fs_context *fc; struct vfsmount *mnt; fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT); if (IS_ERR(fc)) { mnt = ERR_CAST(fc); } else { struct hugetlbfs_fs_context *ctx = fc->fs_private; ctx->hstate = h; mnt = fc_mount(fc); put_fs_context(fc); } if (IS_ERR(mnt)) pr_err("Cannot mount internal hugetlbfs for page size %luK", huge_page_size(h) / SZ_1K); return mnt; } static int __init init_hugetlbfs_fs(void) { struct vfsmount *mnt; struct hstate *h; int error; int i; if (!hugepages_supported()) { pr_info("disabling because there are no supported hugepage sizes\n"); return -ENOTSUPP; } error = -ENOMEM; hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", sizeof(struct hugetlbfs_inode_info), 0, SLAB_ACCOUNT, init_once); if (hugetlbfs_inode_cachep == NULL) goto out; error = register_filesystem(&hugetlbfs_fs_type); if (error) goto out_free; /* default hstate mount is required */ mnt = mount_one_hugetlbfs(&default_hstate); if (IS_ERR(mnt)) { error = PTR_ERR(mnt); goto out_unreg; } hugetlbfs_vfsmount[default_hstate_idx] = mnt; /* other hstates are optional */ i = 0; for_each_hstate(h) { if (i == default_hstate_idx) { i++; continue; } mnt = mount_one_hugetlbfs(h); if (IS_ERR(mnt)) hugetlbfs_vfsmount[i] = NULL; else hugetlbfs_vfsmount[i] = mnt; i++; } return 0; out_unreg: (void)unregister_filesystem(&hugetlbfs_fs_type); out_free: kmem_cache_destroy(hugetlbfs_inode_cachep); out: return error; } fs_initcall(init_hugetlbfs_fs) |
| 1 1 1 1 1 1 1 1 1 1 1 786 1 1 70 483 14 14 479 2 490 1 408 13 410 46 45 771 771 773 771 61 61 61 61 62 772 771 62 62 62 62 62 2 1 5 5 5 5 5 5 5 5 5 5 5 1152 35 5 5 5 1146 489 487 15 15 520 520 519 712 710 711 708 28 26 710 719 714 713 28 719 27 2 1 6 6 6 6 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 | // SPDX-License-Identifier: GPL-2.0-only /* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org> */ #include <linux/types.h> #include <linux/export.h> #include <linux/init.h> #include <linux/udp.h> #include <linux/tcp.h> #include <linux/icmp.h> #include <linux/icmpv6.h> #include <linux/dccp.h> #include <linux/sctp.h> #include <net/sctp/checksum.h> #include <linux/netfilter.h> #include <net/netfilter/nf_nat.h> #include <linux/ipv6.h> #include <linux/netfilter_ipv6.h> #include <net/checksum.h> #include <net/ip6_checksum.h> #include <net/ip6_route.h> #include <net/xfrm.h> #include <net/ipv6.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack.h> #include <linux/netfilter/nfnetlink_conntrack.h> static void nf_csum_update(struct sk_buff *skb, unsigned int iphdroff, __sum16 *check, const struct nf_conntrack_tuple *t, enum nf_nat_manip_type maniptype); static void __udp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, struct udphdr *hdr, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype, bool do_csum) { __be16 *portptr, newport; if (maniptype == NF_NAT_MANIP_SRC) { /* Get rid of src port */ newport = tuple->src.u.udp.port; portptr = &hdr->source; } else { /* Get rid of dst port */ newport = tuple->dst.u.udp.port; portptr = &hdr->dest; } if (do_csum) { nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype); inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false); if (!hdr->check) hdr->check = CSUM_MANGLED_0; } *portptr = newport; } static bool udp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { struct udphdr *hdr; if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false; hdr = (struct udphdr *)(skb->data + hdroff); __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, !!hdr->check); return true; } static bool udplite_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { #ifdef CONFIG_NF_CT_PROTO_UDPLITE struct udphdr *hdr; if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false; hdr = (struct udphdr *)(skb->data + hdroff); __udp_manip_pkt(skb, iphdroff, hdr, tuple, maniptype, true); #endif return true; } static bool sctp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { #ifdef CONFIG_NF_CT_PROTO_SCTP struct sctphdr *hdr; int hdrsize = 8; /* This could be an inner header returned in imcp packet; in such * cases we cannot update the checksum field since it is outside * of the 8 bytes of transport layer headers we are guaranteed. */ if (skb->len >= hdroff + sizeof(*hdr)) hdrsize = sizeof(*hdr); if (skb_ensure_writable(skb, hdroff + hdrsize)) return false; hdr = (struct sctphdr *)(skb->data + hdroff); if (maniptype == NF_NAT_MANIP_SRC) { /* Get rid of src port */ hdr->source = tuple->src.u.sctp.port; } else { /* Get rid of dst port */ hdr->dest = tuple->dst.u.sctp.port; } if (hdrsize < sizeof(*hdr)) return true; if (skb->ip_summed != CHECKSUM_PARTIAL) { hdr->checksum = sctp_compute_cksum(skb, hdroff); skb->ip_summed = CHECKSUM_NONE; } #endif return true; } static bool tcp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { struct tcphdr *hdr; __be16 *portptr, newport, oldport; int hdrsize = 8; /* TCP connection tracking guarantees this much */ /* this could be a inner header returned in icmp packet; in such cases we cannot update the checksum field since it is outside of the 8 bytes of transport layer headers we are guaranteed */ if (skb->len >= hdroff + sizeof(struct tcphdr)) hdrsize = sizeof(struct tcphdr); if (skb_ensure_writable(skb, hdroff + hdrsize)) return false; hdr = (struct tcphdr *)(skb->data + hdroff); if (maniptype == NF_NAT_MANIP_SRC) { /* Get rid of src port */ newport = tuple->src.u.tcp.port; portptr = &hdr->source; } else { /* Get rid of dst port */ newport = tuple->dst.u.tcp.port; portptr = &hdr->dest; } oldport = *portptr; *portptr = newport; if (hdrsize < sizeof(*hdr)) return true; nf_csum_update(skb, iphdroff, &hdr->check, tuple, maniptype); inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, false); return true; } static bool dccp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { #ifdef CONFIG_NF_CT_PROTO_DCCP struct dccp_hdr *hdr; __be16 *portptr, oldport, newport; int hdrsize = 8; /* DCCP connection tracking guarantees this much */ if (skb->len >= hdroff + sizeof(struct dccp_hdr)) hdrsize = sizeof(struct dccp_hdr); if (skb_ensure_writable(skb, hdroff + hdrsize)) return false; hdr = (struct dccp_hdr *)(skb->data + hdroff); if (maniptype == NF_NAT_MANIP_SRC) { newport = tuple->src.u.dccp.port; portptr = &hdr->dccph_sport; } else { newport = tuple->dst.u.dccp.port; portptr = &hdr->dccph_dport; } oldport = *portptr; *portptr = newport; if (hdrsize < sizeof(*hdr)) return true; nf_csum_update(skb, iphdroff, &hdr->dccph_checksum, tuple, maniptype); inet_proto_csum_replace2(&hdr->dccph_checksum, skb, oldport, newport, false); #endif return true; } static bool icmp_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { struct icmphdr *hdr; if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false; hdr = (struct icmphdr *)(skb->data + hdroff); switch (hdr->type) { case ICMP_ECHO: case ICMP_ECHOREPLY: case ICMP_TIMESTAMP: case ICMP_TIMESTAMPREPLY: case ICMP_INFO_REQUEST: case ICMP_INFO_REPLY: case ICMP_ADDRESS: case ICMP_ADDRESSREPLY: break; default: return true; } inet_proto_csum_replace2(&hdr->checksum, skb, hdr->un.echo.id, tuple->src.u.icmp.id, false); hdr->un.echo.id = tuple->src.u.icmp.id; return true; } static bool icmpv6_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { struct icmp6hdr *hdr; if (skb_ensure_writable(skb, hdroff + sizeof(*hdr))) return false; hdr = (struct icmp6hdr *)(skb->data + hdroff); nf_csum_update(skb, iphdroff, &hdr->icmp6_cksum, tuple, maniptype); if (hdr->icmp6_type == ICMPV6_ECHO_REQUEST || hdr->icmp6_type == ICMPV6_ECHO_REPLY) { inet_proto_csum_replace2(&hdr->icmp6_cksum, skb, hdr->icmp6_identifier, tuple->src.u.icmp.id, false); hdr->icmp6_identifier = tuple->src.u.icmp.id; } return true; } /* manipulate a GRE packet according to maniptype */ static bool gre_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { #if IS_ENABLED(CONFIG_NF_CT_PROTO_GRE) const struct gre_base_hdr *greh; struct pptp_gre_header *pgreh; /* pgreh includes two optional 32bit fields which are not required * to be there. That's where the magic '8' comes from */ if (skb_ensure_writable(skb, hdroff + sizeof(*pgreh) - 8)) return false; greh = (void *)skb->data + hdroff; pgreh = (struct pptp_gre_header *)greh; /* we only have destination manip of a packet, since 'source key' * is not present in the packet itself */ if (maniptype != NF_NAT_MANIP_DST) return true; switch (greh->flags & GRE_VERSION) { case GRE_VERSION_0: /* We do not currently NAT any GREv0 packets. * Try to behave like "nf_nat_proto_unknown" */ break; case GRE_VERSION_1: pr_debug("call_id -> 0x%04x\n", ntohs(tuple->dst.u.gre.key)); pgreh->call_id = tuple->dst.u.gre.key; break; default: pr_debug("can't nat unknown GRE version\n"); return false; } #endif return true; } static bool l4proto_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, unsigned int hdroff, const struct nf_conntrack_tuple *tuple, enum nf_nat_manip_type maniptype) { switch (tuple->dst.protonum) { case IPPROTO_TCP: return tcp_manip_pkt(skb, iphdroff, hdroff, tuple, maniptype); case IPPROTO_UDP: return udp_manip_pkt(skb, iphdroff, hdroff, tuple, maniptype); case IPPROTO_UDPLITE: return udplite_manip_pkt(skb, iphdroff, hdroff, tuple, maniptype); case IPPROTO_SCTP: return sctp_manip_pkt(skb, iphdroff, hdroff, tuple, maniptype); case IPPROTO_ICMP: return icmp_manip_pkt(skb, iphdroff, hdroff, tuple, maniptype); case IPPROTO_ICMPV6: return icmpv6_manip_pkt(skb, iphdroff, hdroff, tuple, maniptype); case IPPROTO_DCCP: return dccp_manip_pkt(skb, iphdroff, hdroff, tuple, maniptype); case IPPROTO_GRE: return gre_manip_pkt(skb, iphdroff, hdroff, tuple, maniptype); } /* If we don't know protocol -- no error, pass it unmodified. */ return true; } static bool nf_nat_ipv4_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *target, enum nf_nat_manip_type maniptype) { struct iphdr *iph; unsigned int hdroff; if (skb_ensure_writable(skb, iphdroff + sizeof(*iph))) return false; iph = (void *)skb->data + iphdroff; hdroff = iphdroff + iph->ihl * 4; if (!l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype)) return false; iph = (void *)skb->data + iphdroff; if (maniptype == NF_NAT_MANIP_SRC) { csum_replace4(&iph->check, iph->saddr, target->src.u3.ip); iph->saddr = target->src.u3.ip; } else { csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip); iph->daddr = target->dst.u3.ip; } return true; } static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb, unsigned int iphdroff, const struct nf_conntrack_tuple *target, enum nf_nat_manip_type maniptype) { #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ipv6h; __be16 frag_off; int hdroff; u8 nexthdr; if (skb_ensure_writable(skb, iphdroff + sizeof(*ipv6h))) return false; ipv6h = (void *)skb->data + iphdroff; nexthdr = ipv6h->nexthdr; hdroff = ipv6_skip_exthdr(skb, iphdroff + sizeof(*ipv6h), &nexthdr, &frag_off); if (hdroff < 0) goto manip_addr; if ((frag_off & htons(~0x7)) == 0 && !l4proto_manip_pkt(skb, iphdroff, hdroff, target, maniptype)) return false; /* must reload, offset might have changed */ ipv6h = (void *)skb->data + iphdroff; manip_addr: if (maniptype == NF_NAT_MANIP_SRC) ipv6h->saddr = target->src.u3.in6; else ipv6h->daddr = target->dst.u3.in6; #endif return true; } unsigned int nf_nat_manip_pkt(struct sk_buff *skb, struct nf_conn *ct, enum nf_nat_manip_type mtype, enum ip_conntrack_dir dir) { struct nf_conntrack_tuple target; /* We are aiming to look like inverse of other direction. */ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); switch (target.src.l3num) { case NFPROTO_IPV6: if (nf_nat_ipv6_manip_pkt(skb, 0, &target, mtype)) return NF_ACCEPT; break; case NFPROTO_IPV4: if (nf_nat_ipv4_manip_pkt(skb, 0, &target, mtype)) return NF_ACCEPT; break; default: WARN_ON_ONCE(1); break; } return NF_DROP; } static void nf_nat_ipv4_csum_update(struct sk_buff *skb, unsigned int iphdroff, __sum16 *check, const struct nf_conntrack_tuple *t, enum nf_nat_manip_type maniptype) { struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); __be32 oldip, newip; if (maniptype == NF_NAT_MANIP_SRC) { oldip = iph->saddr; newip = t->src.u3.ip; } else { oldip = iph->daddr; newip = t->dst.u3.ip; } inet_proto_csum_replace4(check, skb, oldip, newip, true); } static void nf_nat_ipv6_csum_update(struct sk_buff *skb, unsigned int iphdroff, __sum16 *check, const struct nf_conntrack_tuple *t, enum nf_nat_manip_type maniptype) { #if IS_ENABLED(CONFIG_IPV6) const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + iphdroff); const struct in6_addr *oldip, *newip; if (maniptype == NF_NAT_MANIP_SRC) { oldip = &ipv6h->saddr; newip = &t->src.u3.in6; } else { oldip = &ipv6h->daddr; newip = &t->dst.u3.in6; } inet_proto_csum_replace16(check, skb, oldip->s6_addr32, newip->s6_addr32, true); #endif } static void nf_csum_update(struct sk_buff *skb, unsigned int iphdroff, __sum16 *check, const struct nf_conntrack_tuple *t, enum nf_nat_manip_type maniptype) { switch (t->src.l3num) { case NFPROTO_IPV4: nf_nat_ipv4_csum_update(skb, iphdroff, check, t, maniptype); return; case NFPROTO_IPV6: nf_nat_ipv6_csum_update(skb, iphdroff, check, t, maniptype); return; } } static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb, u8 proto, void *data, __sum16 *check, int datalen, int oldlen) { if (skb->ip_summed != CHECKSUM_PARTIAL) { const struct iphdr *iph = ip_hdr(skb); skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) + ip_hdrlen(skb); skb->csum_offset = (void *)check - data; *check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, datalen, proto, 0); } else { inet_proto_csum_replace2(check, skb, htons(oldlen), htons(datalen), true); } } #if IS_ENABLED(CONFIG_IPV6) static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb, u8 proto, void *data, __sum16 *check, int datalen, int oldlen) { if (skb->ip_summed != CHECKSUM_PARTIAL) { const struct ipv6hdr *ipv6h = ipv6_hdr(skb); skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_headroom(skb) + skb_network_offset(skb) + (data - (void *)skb->data); skb->csum_offset = (void *)check - data; *check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, datalen, proto, 0); } else { inet_proto_csum_replace2(check, skb, htons(oldlen), htons(datalen), true); } } #endif void nf_nat_csum_recalc(struct sk_buff *skb, u8 nfproto, u8 proto, void *data, __sum16 *check, int datalen, int oldlen) { switch (nfproto) { case NFPROTO_IPV4: nf_nat_ipv4_csum_recalc(skb, proto, data, check, datalen, oldlen); return; #if IS_ENABLED(CONFIG_IPV6) case NFPROTO_IPV6: nf_nat_ipv6_csum_recalc(skb, proto, data, check, datalen, oldlen); return; #endif } WARN_ON_ONCE(1); } int nf_nat_icmp_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum) { struct { struct icmphdr icmp; struct iphdr ip; } *inside; enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); unsigned int hdrlen = ip_hdrlen(skb); struct nf_conntrack_tuple target; unsigned long statusbit; WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY); if (skb_ensure_writable(skb, hdrlen + sizeof(*inside))) return 0; if (nf_ip_checksum(skb, hooknum, hdrlen, IPPROTO_ICMP)) return 0; inside = (void *)skb->data + hdrlen; if (inside->icmp.type == ICMP_REDIRECT) { if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) return 0; if (ct->status & IPS_NAT_MASK) return 0; } if (manip == NF_NAT_MANIP_SRC) statusbit = IPS_SRC_NAT; else statusbit = IPS_DST_NAT; /* Invert if this is reply direction */ if (dir == IP_CT_DIR_REPLY) statusbit ^= IPS_NAT_MASK; if (!(ct->status & statusbit)) return 1; if (!nf_nat_ipv4_manip_pkt(skb, hdrlen + sizeof(inside->icmp), &ct->tuplehash[!dir].tuple, !manip)) return 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { /* Reloading "inside" here since manip_pkt may reallocate */ inside = (void *)skb->data + hdrlen; inside->icmp.checksum = 0; inside->icmp.checksum = csum_fold(skb_checksum(skb, hdrlen, skb->len - hdrlen, 0)); } /* Change outer to look like the reply to an incoming packet */ nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); target.dst.protonum = IPPROTO_ICMP; if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) return 0; return 1; } EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation); static unsigned int nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; ct = nf_ct_get(skb, &ctinfo); if (!ct) return NF_ACCEPT; if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo, state->hook)) return NF_DROP; else return NF_ACCEPT; } } return nf_nat_inet_fn(priv, skb, state); } static unsigned int nf_nat_ipv4_pre_routing(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { unsigned int ret; __be32 daddr = ip_hdr(skb)->daddr; ret = nf_nat_ipv4_fn(priv, skb, state); if (ret == NF_ACCEPT && daddr != ip_hdr(skb)->daddr) skb_dst_drop(skb); return ret; } #ifdef CONFIG_XFRM static int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family) { struct sock *sk = skb->sk; struct dst_entry *dst; unsigned int hh_len; struct flowi fl; int err; err = xfrm_decode_session(net, skb, &fl, family); if (err < 0) return err; dst = skb_dst(skb); if (dst->xfrm) dst = ((struct xfrm_dst *)dst)->route; if (!dst_hold_safe(dst)) return -EHOSTUNREACH; if (sk && !net_eq(net, sock_net(sk))) sk = NULL; dst = xfrm_lookup(net, dst, &fl, sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_drop(skb); skb_dst_set(skb, dst); /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) return -ENOMEM; return 0; } #endif static bool nf_nat_inet_port_was_mangled(const struct sk_buff *skb, __be16 sport) { enum ip_conntrack_info ctinfo; enum ip_conntrack_dir dir; const struct nf_conn *ct; ct = nf_ct_get(skb, &ctinfo); if (!ct) return false; switch (nf_ct_protonum(ct)) { case IPPROTO_TCP: case IPPROTO_UDP: break; default: return false; } dir = CTINFO2DIR(ctinfo); if (dir != IP_CT_DIR_ORIGINAL) return false; return ct->tuplehash[!dir].tuple.dst.u.all != sport; } static unsigned int nf_nat_ipv4_local_in(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { __be32 saddr = ip_hdr(skb)->saddr; struct sock *sk = skb->sk; unsigned int ret; ret = nf_nat_ipv4_fn(priv, skb, state); if (ret != NF_ACCEPT || !sk || inet_sk_transparent(sk)) return ret; /* skb has a socket assigned via tcp edemux. We need to check * if nf_nat_ipv4_fn() has mangled the packet in a way that * edemux would not have found this socket. * * This includes both changes to the source address and changes * to the source port, which are both handled by the * nf_nat_ipv4_fn() call above -- long after tcp/udp early demux * might have found a socket for the old (pre-snat) address. */ if (saddr != ip_hdr(skb)->saddr || nf_nat_inet_port_was_mangled(skb, sk->sk_dport)) skb_orphan(skb); /* TCP edemux obtained wrong socket */ return ret; } static unsigned int nf_nat_ipv4_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { #ifdef CONFIG_XFRM const struct nf_conn *ct; enum ip_conntrack_info ctinfo; int err; #endif unsigned int ret; ret = nf_nat_ipv4_fn(priv, skb, state); #ifdef CONFIG_XFRM if (ret != NF_ACCEPT) return ret; if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) return ret; ct = nf_ct_get(skb, &ctinfo); if (ct) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if (ct->tuplehash[dir].tuple.src.u3.ip != ct->tuplehash[!dir].tuple.dst.u3.ip || (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && ct->tuplehash[dir].tuple.src.u.all != ct->tuplehash[!dir].tuple.dst.u.all)) { err = nf_xfrm_me_harder(state->net, skb, AF_INET); if (err < 0) ret = NF_DROP_ERR(err); } } #endif return ret; } static unsigned int nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { const struct nf_conn *ct; enum ip_conntrack_info ctinfo; unsigned int ret; int err; ret = nf_nat_ipv4_fn(priv, skb, state); if (ret != NF_ACCEPT) return ret; ct = nf_ct_get(skb, &ctinfo); if (ct) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if (ct->tuplehash[dir].tuple.dst.u3.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); if (err < 0) ret = NF_DROP_ERR(err); } #ifdef CONFIG_XFRM else if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && ct->tuplehash[dir].tuple.dst.u.all != ct->tuplehash[!dir].tuple.src.u.all) { err = nf_xfrm_me_harder(state->net, skb, AF_INET); if (err < 0) ret = NF_DROP_ERR(err); } #endif } return ret; } static const struct nf_hook_ops nf_nat_ipv4_ops[] = { /* Before packet filtering, change destination */ { .hook = nf_nat_ipv4_pre_routing, .pf = NFPROTO_IPV4, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP_PRI_NAT_DST, }, /* After packet filtering, change source */ { .hook = nf_nat_ipv4_out, .pf = NFPROTO_IPV4, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP_PRI_NAT_SRC, }, /* Before packet filtering, change destination */ { .hook = nf_nat_ipv4_local_fn, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP_PRI_NAT_DST, }, /* After packet filtering, change source */ { .hook = nf_nat_ipv4_local_in, .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP_PRI_NAT_SRC, }, }; int nf_nat_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops) { return nf_nat_register_fn(net, ops->pf, ops, nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops)); } EXPORT_SYMBOL_GPL(nf_nat_ipv4_register_fn); void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops) { nf_nat_unregister_fn(net, ops->pf, ops, ARRAY_SIZE(nf_nat_ipv4_ops)); } EXPORT_SYMBOL_GPL(nf_nat_ipv4_unregister_fn); #if IS_ENABLED(CONFIG_IPV6) int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int hooknum, unsigned int hdrlen) { struct { struct icmp6hdr icmp6; struct ipv6hdr ip6; } *inside; enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); enum nf_nat_manip_type manip = HOOK2MANIP(hooknum); struct nf_conntrack_tuple target; unsigned long statusbit; WARN_ON(ctinfo != IP_CT_RELATED && ctinfo != IP_CT_RELATED_REPLY); if (skb_ensure_writable(skb, hdrlen + sizeof(*inside))) return 0; if (nf_ip6_checksum(skb, hooknum, hdrlen, IPPROTO_ICMPV6)) return 0; inside = (void *)skb->data + hdrlen; if (inside->icmp6.icmp6_type == NDISC_REDIRECT) { if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK) return 0; if (ct->status & IPS_NAT_MASK) return 0; } if (manip == NF_NAT_MANIP_SRC) statusbit = IPS_SRC_NAT; else statusbit = IPS_DST_NAT; /* Invert if this is reply direction */ if (dir == IP_CT_DIR_REPLY) statusbit ^= IPS_NAT_MASK; if (!(ct->status & statusbit)) return 1; if (!nf_nat_ipv6_manip_pkt(skb, hdrlen + sizeof(inside->icmp6), &ct->tuplehash[!dir].tuple, !manip)) return 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { struct ipv6hdr *ipv6h = ipv6_hdr(skb); inside = (void *)skb->data + hdrlen; inside->icmp6.icmp6_cksum = 0; inside->icmp6.icmp6_cksum = csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len - hdrlen, IPPROTO_ICMPV6, skb_checksum(skb, hdrlen, skb->len - hdrlen, 0)); } nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); target.dst.protonum = IPPROTO_ICMPV6; if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) return 0; return 1; } EXPORT_SYMBOL_GPL(nf_nat_icmpv6_reply_translation); static unsigned int nf_nat_ipv6_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct nf_conn *ct; enum ip_conntrack_info ctinfo; __be16 frag_off; int hdrlen; u8 nexthdr; ct = nf_ct_get(skb, &ctinfo); /* Can't track? It's not due to stress, or conntrack would * have dropped it. Hence it's the user's responsibilty to * packet filter it out, or implement conntrack/NAT for that * protocol. 8) --RR */ if (!ct) return NF_ACCEPT; if (ctinfo == IP_CT_RELATED || ctinfo == IP_CT_RELATED_REPLY) { nexthdr = ipv6_hdr(skb)->nexthdr; hdrlen = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, &frag_off); if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) { if (!nf_nat_icmpv6_reply_translation(skb, ct, ctinfo, state->hook, hdrlen)) return NF_DROP; else return NF_ACCEPT; } } return nf_nat_inet_fn(priv, skb, state); } static unsigned int nf_nat_ipv6_local_in(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { struct in6_addr saddr = ipv6_hdr(skb)->saddr; struct sock *sk = skb->sk; unsigned int ret; ret = nf_nat_ipv6_fn(priv, skb, state); if (ret != NF_ACCEPT || !sk || inet_sk_transparent(sk)) return ret; /* see nf_nat_ipv4_local_in */ if (ipv6_addr_cmp(&saddr, &ipv6_hdr(skb)->saddr) || nf_nat_inet_port_was_mangled(skb, sk->sk_dport)) skb_orphan(skb); return ret; } static unsigned int nf_nat_ipv6_in(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { unsigned int ret, verdict; struct in6_addr daddr = ipv6_hdr(skb)->daddr; ret = nf_nat_ipv6_fn(priv, skb, state); verdict = ret & NF_VERDICT_MASK; if (verdict != NF_DROP && verdict != NF_STOLEN && ipv6_addr_cmp(&daddr, &ipv6_hdr(skb)->daddr)) skb_dst_drop(skb); return ret; } static unsigned int nf_nat_ipv6_out(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { #ifdef CONFIG_XFRM const struct nf_conn *ct; enum ip_conntrack_info ctinfo; int err; #endif unsigned int ret; ret = nf_nat_ipv6_fn(priv, skb, state); #ifdef CONFIG_XFRM if (ret != NF_ACCEPT) return ret; if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) return ret; ct = nf_ct_get(skb, &ctinfo); if (ct) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3, &ct->tuplehash[!dir].tuple.dst.u3) || (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && ct->tuplehash[dir].tuple.src.u.all != ct->tuplehash[!dir].tuple.dst.u.all)) { err = nf_xfrm_me_harder(state->net, skb, AF_INET6); if (err < 0) ret = NF_DROP_ERR(err); } } #endif return ret; } static unsigned int nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { const struct nf_conn *ct; enum ip_conntrack_info ctinfo; unsigned int ret; int err; ret = nf_nat_ipv6_fn(priv, skb, state); if (ret != NF_ACCEPT) return ret; ct = nf_ct_get(skb, &ctinfo); if (ct) { enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &ct->tuplehash[!dir].tuple.src.u3)) { err = nf_ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } #ifdef CONFIG_XFRM else if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMPV6 && ct->tuplehash[dir].tuple.dst.u.all != ct->tuplehash[!dir].tuple.src.u.all) { err = nf_xfrm_me_harder(state->net, skb, AF_INET6); if (err < 0) ret = NF_DROP_ERR(err); } #endif } return ret; } static const struct nf_hook_ops nf_nat_ipv6_ops[] = { /* Before packet filtering, change destination */ { .hook = nf_nat_ipv6_in, .pf = NFPROTO_IPV6, .hooknum = NF_INET_PRE_ROUTING, .priority = NF_IP6_PRI_NAT_DST, }, /* After packet filtering, change source */ { .hook = nf_nat_ipv6_out, .pf = NFPROTO_IPV6, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP6_PRI_NAT_SRC, }, /* Before packet filtering, change destination */ { .hook = nf_nat_ipv6_local_fn, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP6_PRI_NAT_DST, }, /* After packet filtering, change source */ { .hook = nf_nat_ipv6_local_in, .pf = NFPROTO_IPV6, .hooknum = NF_INET_LOCAL_IN, .priority = NF_IP6_PRI_NAT_SRC, }, }; int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops) { return nf_nat_register_fn(net, ops->pf, ops, nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops)); } EXPORT_SYMBOL_GPL(nf_nat_ipv6_register_fn); void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops) { nf_nat_unregister_fn(net, ops->pf, ops, ARRAY_SIZE(nf_nat_ipv6_ops)); } EXPORT_SYMBOL_GPL(nf_nat_ipv6_unregister_fn); #endif /* CONFIG_IPV6 */ #if defined(CONFIG_NF_TABLES_INET) && IS_ENABLED(CONFIG_NFT_NAT) int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops) { int ret; if (WARN_ON_ONCE(ops->pf != NFPROTO_INET)) return -EINVAL; ret = nf_nat_register_fn(net, NFPROTO_IPV6, ops, nf_nat_ipv6_ops, ARRAY_SIZE(nf_nat_ipv6_ops)); if (ret) return ret; ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops, ARRAY_SIZE(nf_nat_ipv4_ops)); if (ret) nf_nat_unregister_fn(net, NFPROTO_IPV6, ops, ARRAY_SIZE(nf_nat_ipv6_ops)); return ret; } EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn); void nf_nat_inet_unregister_fn(struct net *net, const struct nf_hook_ops *ops) { nf_nat_unregister_fn(net, NFPROTO_IPV4, ops, ARRAY_SIZE(nf_nat_ipv4_ops)); nf_nat_unregister_fn(net, NFPROTO_IPV6, ops, ARRAY_SIZE(nf_nat_ipv6_ops)); } EXPORT_SYMBOL_GPL(nf_nat_inet_unregister_fn); #endif /* NFT INET NAT */ |
| 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 | /* SPDX-License-Identifier: GPL-2.0-only */ /* * Media device * * Copyright (C) 2010 Nokia Corporation * * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Sakari Ailus <sakari.ailus@iki.fi> */ #ifndef _MEDIA_DEVICE_H #define _MEDIA_DEVICE_H #include <linux/list.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <media/media-devnode.h> #include <media/media-entity.h> struct ida; struct media_device; /** * struct media_entity_notify - Media Entity Notify * * @list: List head * @notify_data: Input data to invoke the callback * @notify: Callback function pointer * * Drivers may register a callback to take action when new entities get * registered with the media device. This handler is intended for creating * links between existing entities and should not create entities and register * them. */ struct media_entity_notify { struct list_head list; void *notify_data; void (*notify)(struct media_entity *entity, void *notify_data); }; /** * struct media_device_ops - Media device operations * @link_notify: Link state change notification callback. This callback is * called with the graph_mutex held. * @req_alloc: Allocate a request. Set this if you need to allocate a struct * larger then struct media_request. @req_alloc and @req_free must * either both be set or both be NULL. * @req_free: Free a request. Set this if @req_alloc was set as well, leave * to NULL otherwise. * @req_validate: Validate a request, but do not queue yet. The req_queue_mutex * lock is held when this op is called. * @req_queue: Queue a validated request, cannot fail. If something goes * wrong when queueing this request then it should be marked * as such internally in the driver and any related buffers * must eventually return to vb2 with state VB2_BUF_STATE_ERROR. * The req_queue_mutex lock is held when this op is called. * It is important that vb2 buffer objects are queued last after * all other object types are queued: queueing a buffer kickstarts * the request processing, so all other objects related to the * request (and thus the buffer) must be available to the driver. * And once a buffer is queued, then the driver can complete * or delete objects from the request before req_queue exits. */ struct media_device_ops { int (*link_notify)(struct media_link *link, u32 flags, unsigned int notification); struct media_request *(*req_alloc)(struct media_device *mdev); void (*req_free)(struct media_request *req); int (*req_validate)(struct media_request *req); void (*req_queue)(struct media_request *req); }; /** * struct media_device - Media device * @dev: Parent device * @devnode: Media device node * @driver_name: Optional device driver name. If not set, calls to * %MEDIA_IOC_DEVICE_INFO will return ``dev->driver->name``. * This is needed for USB drivers for example, as otherwise * they'll all appear as if the driver name was "usb". * @model: Device model name * @serial: Device serial number (optional) * @bus_info: Unique and stable device location identifier * @hw_revision: Hardware device revision * @topology_version: Monotonic counter for storing the version of the graph * topology. Should be incremented each time the topology changes. * @id: Unique ID used on the last registered graph object * @entity_internal_idx: Unique internal entity ID used by the graph traversal * algorithms * @entity_internal_idx_max: Allocated internal entity indices * @entities: List of registered entities * @interfaces: List of registered interfaces * @pads: List of registered pads * @links: List of registered links * @entity_notify: List of registered entity_notify callbacks * @graph_mutex: Protects access to struct media_device data * @pm_count_walk: Graph walk for power state walk. Access serialised using * graph_mutex. * * @source_priv: Driver Private data for enable/disable source handlers * @enable_source: Enable Source Handler function pointer * @disable_source: Disable Source Handler function pointer * * @ops: Operation handler callbacks * @req_queue_mutex: Serialise the MEDIA_REQUEST_IOC_QUEUE ioctl w.r.t. * other operations that stop or start streaming. * @request_id: Used to generate unique request IDs * * This structure represents an abstract high-level media device. It allows easy * access to entities and provides basic media device-level support. The * structure can be allocated directly or embedded in a larger structure. * * The parent @dev is a physical device. It must be set before registering the * media device. * * @model is a descriptive model name exported through sysfs. It doesn't have to * be unique. * * @enable_source is a handler to find source entity for the * sink entity and activate the link between them if source * entity is free. Drivers should call this handler before * accessing the source. * * @disable_source is a handler to find source entity for the * sink entity and deactivate the link between them. Drivers * should call this handler to release the source. * * Use-case: find tuner entity connected to the decoder * entity and check if it is available, and activate the * link between them from @enable_source and deactivate * from @disable_source. * * .. note:: * * Bridge driver is expected to implement and set the * handler when &media_device is registered or when * bridge driver finds the media_device during probe. * Bridge driver sets source_priv with information * necessary to run @enable_source and @disable_source handlers. * Callers should hold graph_mutex to access and call @enable_source * and @disable_source handlers. */ struct media_device { /* dev->driver_data points to this struct. */ struct device *dev; struct media_devnode *devnode; char model[32]; char driver_name[32]; char serial[40]; char bus_info[32]; u32 hw_revision; u64 topology_version; u32 id; struct ida entity_internal_idx; int entity_internal_idx_max; struct list_head entities; struct list_head interfaces; struct list_head pads; struct list_head links; /* notify callback list invoked when a new entity is registered */ struct list_head entity_notify; /* Serializes graph operations. */ struct mutex graph_mutex; struct media_graph pm_count_walk; void *source_priv; int (*enable_source)(struct media_entity *entity, struct media_pipeline *pipe); void (*disable_source)(struct media_entity *entity); const struct media_device_ops *ops; struct mutex req_queue_mutex; atomic_t request_id; }; /* We don't need to include usb.h here */ struct usb_device; #ifdef CONFIG_MEDIA_CONTROLLER /* Supported link_notify @notification values. */ #define MEDIA_DEV_NOTIFY_PRE_LINK_CH 0 #define MEDIA_DEV_NOTIFY_POST_LINK_CH 1 /** * media_device_init() - Initializes a media device element * * @mdev: pointer to struct &media_device * * This function initializes the media device prior to its registration. * The media device initialization and registration is split in two functions * to avoid race conditions and make the media device available to user-space * before the media graph has been completed. * * So drivers need to first initialize the media device, register any entity * within the media device, create pad to pad links and then finally register * the media device by calling media_device_register() as a final step. * * The caller is responsible for initializing the media device before * registration. The following fields must be set: * * - dev must point to the parent device * - model must be filled with the device model name * * The bus_info field is set by media_device_init() for PCI and platform devices * if the field begins with '\0'. */ void media_device_init(struct media_device *mdev); /** * media_device_cleanup() - Cleanups a media device element * * @mdev: pointer to struct &media_device * * This function that will destroy the graph_mutex that is * initialized in media_device_init(). */ void media_device_cleanup(struct media_device *mdev); /** * __media_device_register() - Registers a media device element * * @mdev: pointer to struct &media_device * @owner: should be filled with %THIS_MODULE * * Users, should, instead, call the media_device_register() macro. * * The caller is responsible for initializing the &media_device structure * before registration. The following fields of &media_device must be set: * * - &media_device.model must be filled with the device model name as a * NUL-terminated UTF-8 string. The device/model revision must not be * stored in this field. * * The following fields are optional: * * - &media_device.serial is a unique serial number stored as a * NUL-terminated ASCII string. The field is big enough to store a GUID * in text form. If the hardware doesn't provide a unique serial number * this field must be left empty. * * - &media_device.bus_info represents the location of the device in the * system as a NUL-terminated ASCII string. For PCI/PCIe devices * &media_device.bus_info must be set to "PCI:" (or "PCIe:") followed by * the value of pci_name(). For USB devices,the usb_make_path() function * must be used. This field is used by applications to distinguish between * otherwise identical devices that don't provide a serial number. * * - &media_device.hw_revision is the hardware device revision in a * driver-specific format. When possible the revision should be formatted * with the KERNEL_VERSION() macro. * * .. note:: * * #) Upon successful registration a character device named media[0-9]+ is created. The device major and minor numbers are dynamic. The model name is exported as a sysfs attribute. * * #) Unregistering a media device that hasn't been registered is **NOT** safe. * * Return: returns zero on success or a negative error code. */ int __must_check __media_device_register(struct media_device *mdev, struct module *owner); /** * media_device_register() - Registers a media device element * * @mdev: pointer to struct &media_device * * This macro calls __media_device_register() passing %THIS_MODULE as * the __media_device_register() second argument (**owner**). */ #define media_device_register(mdev) __media_device_register(mdev, THIS_MODULE) /** * media_device_unregister() - Unregisters a media device element * * @mdev: pointer to struct &media_device * * It is safe to call this function on an unregistered (but initialised) * media device. */ void media_device_unregister(struct media_device *mdev); /** * media_device_register_entity() - registers a media entity inside a * previously registered media device. * * @mdev: pointer to struct &media_device * @entity: pointer to struct &media_entity to be registered * * Entities are identified by a unique positive integer ID. The media * controller framework will such ID automatically. IDs are not guaranteed * to be contiguous, and the ID number can change on newer Kernel versions. * So, neither the driver nor userspace should hardcode ID numbers to refer * to the entities, but, instead, use the framework to find the ID, when * needed. * * The media_entity name, type and flags fields should be initialized before * calling media_device_register_entity(). Entities embedded in higher-level * standard structures can have some of those fields set by the higher-level * framework. * * If the device has pads, media_entity_pads_init() should be called before * this function. Otherwise, the &media_entity.pad and &media_entity.num_pads * should be zeroed before calling this function. * * Entities have flags that describe the entity capabilities and state: * * %MEDIA_ENT_FL_DEFAULT * indicates the default entity for a given type. * This can be used to report the default audio and video devices or the * default camera sensor. * * .. note:: * * Drivers should set the entity function before calling this function. * Please notice that the values %MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN and * %MEDIA_ENT_F_UNKNOWN should not be used by the drivers. */ int __must_check media_device_register_entity(struct media_device *mdev, struct media_entity *entity); /** * media_device_unregister_entity() - unregisters a media entity. * * @entity: pointer to struct &media_entity to be unregistered * * All links associated with the entity and all PADs are automatically * unregistered from the media_device when this function is called. * * Unregistering an entity will not change the IDs of the other entities and * the previoully used ID will never be reused for a newly registered entities. * * When a media device is unregistered, all its entities are unregistered * automatically. No manual entities unregistration is then required. * * .. note:: * * The media_entity instance itself must be freed explicitly by * the driver if required. */ void media_device_unregister_entity(struct media_entity *entity); /** * media_device_register_entity_notify() - Registers a media entity_notify * callback * * @mdev: The media device * @nptr: The media_entity_notify * * .. note:: * * When a new entity is registered, all the registered * media_entity_notify callbacks are invoked. */ void media_device_register_entity_notify(struct media_device *mdev, struct media_entity_notify *nptr); /** * media_device_unregister_entity_notify() - Unregister a media entity notify * callback * * @mdev: The media device * @nptr: The media_entity_notify * */ void media_device_unregister_entity_notify(struct media_device *mdev, struct media_entity_notify *nptr); /* Iterate over all entities. */ #define media_device_for_each_entity(entity, mdev) \ list_for_each_entry(entity, &(mdev)->entities, graph_obj.list) /* Iterate over all interfaces. */ #define media_device_for_each_intf(intf, mdev) \ list_for_each_entry(intf, &(mdev)->interfaces, graph_obj.list) /* Iterate over all pads. */ #define media_device_for_each_pad(pad, mdev) \ list_for_each_entry(pad, &(mdev)->pads, graph_obj.list) /* Iterate over all links. */ #define media_device_for_each_link(link, mdev) \ list_for_each_entry(link, &(mdev)->links, graph_obj.list) /** * media_device_pci_init() - create and initialize a * struct &media_device from a PCI device. * * @mdev: pointer to struct &media_device * @pci_dev: pointer to struct pci_dev * @name: media device name. If %NULL, the routine will use the default * name for the pci device, given by pci_name() macro. */ void media_device_pci_init(struct media_device *mdev, struct pci_dev *pci_dev, const char *name); /** * __media_device_usb_init() - create and initialize a * struct &media_device from a PCI device. * * @mdev: pointer to struct &media_device * @udev: pointer to struct usb_device * @board_name: media device name. If %NULL, the routine will use the usb * product name, if available. * @driver_name: name of the driver. if %NULL, the routine will use the name * given by ``udev->dev->driver->name``, with is usually the wrong * thing to do. * * .. note:: * * It is better to call media_device_usb_init() instead, as * such macro fills driver_name with %KBUILD_MODNAME. */ void __media_device_usb_init(struct media_device *mdev, struct usb_device *udev, const char *board_name, const char *driver_name); #else static inline void media_device_init(struct media_device *mdev) { } static inline int media_device_register(struct media_device *mdev) { return 0; } static inline void media_device_unregister(struct media_device *mdev) { } static inline void media_device_cleanup(struct media_device *mdev) { } static inline int media_device_register_entity(struct media_device *mdev, struct media_entity *entity) { return 0; } static inline void media_device_unregister_entity(struct media_entity *entity) { } static inline void media_device_register_entity_notify( struct media_device *mdev, struct media_entity_notify *nptr) { } static inline void media_device_unregister_entity_notify( struct media_device *mdev, struct media_entity_notify *nptr) { } static inline void media_device_pci_init(struct media_device *mdev, struct pci_dev *pci_dev, char *name) { } static inline void __media_device_usb_init(struct media_device *mdev, struct usb_device *udev, char *board_name, char *driver_name) { } #endif /* CONFIG_MEDIA_CONTROLLER */ /** * media_device_usb_init() - create and initialize a * struct &media_device from a PCI device. * * @mdev: pointer to struct &media_device * @udev: pointer to struct usb_device * @name: media device name. If %NULL, the routine will use the usb * product name, if available. * * This macro calls media_device_usb_init() passing the * media_device_usb_init() **driver_name** parameter filled with * %KBUILD_MODNAME. */ #define media_device_usb_init(mdev, udev, name) \ __media_device_usb_init(mdev, udev, name, KBUILD_MODNAME) /** * media_set_bus_info() - Set bus_info field * * @bus_info: Variable where to write the bus info (char array) * @bus_info_size: Length of the bus_info * @dev: Related struct device * * Sets bus information based on &dev. This is currently done for PCI and * platform devices. dev is required to be non-NULL for this to happen. * * This function is not meant to be called from drivers. */ static inline void media_set_bus_info(char *bus_info, size_t bus_info_size, struct device *dev) { if (!dev) strscpy(bus_info, "no bus info", bus_info_size); else if (dev_is_platform(dev)) snprintf(bus_info, bus_info_size, "platform:%s", dev_name(dev)); else if (dev_is_pci(dev)) snprintf(bus_info, bus_info_size, "PCI:%s", dev_name(dev)); } #endif |
| 11 11 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943 4944 4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962 4963 4964 4965 4966 4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/kernel.h> #include <linux/bio.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/time.h> #include <linux/string.h> #include <linux/backing-dev.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/writeback.h> #include <linux/compat.h> #include <linux/security.h> #include <linux/xattr.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/uuid.h> #include <linux/btrfs.h> #include <linux/uaccess.h> #include <linux/iversion.h> #include <linux/fileattr.h> #include <linux/fsverity.h> #include <linux/sched/xacct.h> #include <linux/io_uring/cmd.h> #include "ctree.h" #include "disk-io.h" #include "export.h" #include "transaction.h" #include "btrfs_inode.h" #include "volumes.h" #include "locking.h" #include "backref.h" #include "send.h" #include "dev-replace.h" #include "props.h" #include "sysfs.h" #include "qgroup.h" #include "tree-log.h" #include "compression.h" #include "space-info.h" #include "block-group.h" #include "fs.h" #include "accessors.h" #include "extent-tree.h" #include "root-tree.h" #include "defrag.h" #include "dir-item.h" #include "uuid-tree.h" #include "ioctl.h" #include "file.h" #include "scrub.h" #include "super.h" #ifdef CONFIG_64BIT /* If we have a 32-bit userspace and 64-bit kernel, then the UAPI * structures are incorrect, as the timespec structure from userspace * is 4 bytes too small. We define these alternatives here to teach * the kernel about the 32-bit struct packing. */ struct btrfs_ioctl_timespec_32 { __u64 sec; __u32 nsec; } __attribute__ ((__packed__)); struct btrfs_ioctl_received_subvol_args_32 { char uuid[BTRFS_UUID_SIZE]; /* in */ __u64 stransid; /* in */ __u64 rtransid; /* out */ struct btrfs_ioctl_timespec_32 stime; /* in */ struct btrfs_ioctl_timespec_32 rtime; /* out */ __u64 flags; /* in */ __u64 reserved[16]; /* in */ } __attribute__ ((__packed__)); #define BTRFS_IOC_SET_RECEIVED_SUBVOL_32 _IOWR(BTRFS_IOCTL_MAGIC, 37, \ struct btrfs_ioctl_received_subvol_args_32) #endif #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) struct btrfs_ioctl_send_args_32 { __s64 send_fd; /* in */ __u64 clone_sources_count; /* in */ compat_uptr_t clone_sources; /* in */ __u64 parent_root; /* in */ __u64 flags; /* in */ __u32 version; /* in */ __u8 reserved[28]; /* in */ } __attribute__ ((__packed__)); #define BTRFS_IOC_SEND_32 _IOW(BTRFS_IOCTL_MAGIC, 38, \ struct btrfs_ioctl_send_args_32) struct btrfs_ioctl_encoded_io_args_32 { compat_uptr_t iov; compat_ulong_t iovcnt; __s64 offset; __u64 flags; __u64 len; __u64 unencoded_len; __u64 unencoded_offset; __u32 compression; __u32 encryption; __u8 reserved[64]; }; #define BTRFS_IOC_ENCODED_READ_32 _IOR(BTRFS_IOCTL_MAGIC, 64, \ struct btrfs_ioctl_encoded_io_args_32) #define BTRFS_IOC_ENCODED_WRITE_32 _IOW(BTRFS_IOCTL_MAGIC, 64, \ struct btrfs_ioctl_encoded_io_args_32) #endif /* Mask out flags that are inappropriate for the given type of inode. */ static unsigned int btrfs_mask_fsflags_for_type(const struct inode *inode, unsigned int flags) { if (S_ISDIR(inode->i_mode)) return flags; else if (S_ISREG(inode->i_mode)) return flags & ~FS_DIRSYNC_FL; else return flags & (FS_NODUMP_FL | FS_NOATIME_FL); } /* * Export internal inode flags to the format expected by the FS_IOC_GETFLAGS * ioctl. */ static unsigned int btrfs_inode_flags_to_fsflags(const struct btrfs_inode *inode) { unsigned int iflags = 0; u32 flags = inode->flags; u32 ro_flags = inode->ro_flags; if (flags & BTRFS_INODE_SYNC) iflags |= FS_SYNC_FL; if (flags & BTRFS_INODE_IMMUTABLE) iflags |= FS_IMMUTABLE_FL; if (flags & BTRFS_INODE_APPEND) iflags |= FS_APPEND_FL; if (flags & BTRFS_INODE_NODUMP) iflags |= FS_NODUMP_FL; if (flags & BTRFS_INODE_NOATIME) iflags |= FS_NOATIME_FL; if (flags & BTRFS_INODE_DIRSYNC) iflags |= FS_DIRSYNC_FL; if (flags & BTRFS_INODE_NODATACOW) iflags |= FS_NOCOW_FL; if (ro_flags & BTRFS_INODE_RO_VERITY) iflags |= FS_VERITY_FL; if (flags & BTRFS_INODE_NOCOMPRESS) iflags |= FS_NOCOMP_FL; else if (flags & BTRFS_INODE_COMPRESS) iflags |= FS_COMPR_FL; return iflags; } /* * Update inode->i_flags based on the btrfs internal flags. */ void btrfs_sync_inode_flags_to_i_flags(struct btrfs_inode *inode) { unsigned int new_fl = 0; if (inode->flags & BTRFS_INODE_SYNC) new_fl |= S_SYNC; if (inode->flags & BTRFS_INODE_IMMUTABLE) new_fl |= S_IMMUTABLE; if (inode->flags & BTRFS_INODE_APPEND) new_fl |= S_APPEND; if (inode->flags & BTRFS_INODE_NOATIME) new_fl |= S_NOATIME; if (inode->flags & BTRFS_INODE_DIRSYNC) new_fl |= S_DIRSYNC; if (inode->ro_flags & BTRFS_INODE_RO_VERITY) new_fl |= S_VERITY; set_mask_bits(&inode->vfs_inode.i_flags, S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC | S_VERITY, new_fl); } /* * Check if @flags are a supported and valid set of FS_*_FL flags and that * the old and new flags are not conflicting */ static int check_fsflags(unsigned int old_flags, unsigned int flags) { if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ FS_SYNC_FL | FS_DIRSYNC_FL | \ FS_NOCOMP_FL | FS_COMPR_FL | FS_NOCOW_FL)) return -EOPNOTSUPP; /* COMPR and NOCOMP on new/old are valid */ if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL)) return -EINVAL; if ((flags & FS_COMPR_FL) && (flags & FS_NOCOW_FL)) return -EINVAL; /* NOCOW and compression options are mutually exclusive */ if ((old_flags & FS_NOCOW_FL) && (flags & (FS_COMPR_FL | FS_NOCOMP_FL))) return -EINVAL; if ((flags & FS_NOCOW_FL) && (old_flags & (FS_COMPR_FL | FS_NOCOMP_FL))) return -EINVAL; return 0; } static int check_fsflags_compatible(const struct btrfs_fs_info *fs_info, unsigned int flags) { if (btrfs_is_zoned(fs_info) && (flags & FS_NOCOW_FL)) return -EPERM; return 0; } int btrfs_check_ioctl_vol_args_path(const struct btrfs_ioctl_vol_args *vol_args) { if (memchr(vol_args->name, 0, sizeof(vol_args->name)) == NULL) return -ENAMETOOLONG; return 0; } static int btrfs_check_ioctl_vol_args2_subvol_name(const struct btrfs_ioctl_vol_args_v2 *vol_args2) { if (memchr(vol_args2->name, 0, sizeof(vol_args2->name)) == NULL) return -ENAMETOOLONG; return 0; } /* * Set flags/xflags from the internal inode flags. The remaining items of * fsxattr are zeroed. */ int btrfs_fileattr_get(struct dentry *dentry, struct fileattr *fa) { const struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); fileattr_fill_flags(fa, btrfs_inode_flags_to_fsflags(inode)); return 0; } int btrfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa) { struct btrfs_inode *inode = BTRFS_I(d_inode(dentry)); struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; unsigned int fsflags, old_fsflags; int ret; const char *comp = NULL; u32 inode_flags; if (btrfs_root_readonly(root)) return -EROFS; if (fileattr_has_fsx(fa)) return -EOPNOTSUPP; fsflags = btrfs_mask_fsflags_for_type(&inode->vfs_inode, fa->flags); old_fsflags = btrfs_inode_flags_to_fsflags(inode); ret = check_fsflags(old_fsflags, fsflags); if (ret) return ret; ret = check_fsflags_compatible(fs_info, fsflags); if (ret) return ret; inode_flags = inode->flags; if (fsflags & FS_SYNC_FL) inode_flags |= BTRFS_INODE_SYNC; else inode_flags &= ~BTRFS_INODE_SYNC; if (fsflags & FS_IMMUTABLE_FL) inode_flags |= BTRFS_INODE_IMMUTABLE; else inode_flags &= ~BTRFS_INODE_IMMUTABLE; if (fsflags & FS_APPEND_FL) inode_flags |= BTRFS_INODE_APPEND; else inode_flags &= ~BTRFS_INODE_APPEND; if (fsflags & FS_NODUMP_FL) inode_flags |= BTRFS_INODE_NODUMP; else inode_flags &= ~BTRFS_INODE_NODUMP; if (fsflags & FS_NOATIME_FL) inode_flags |= BTRFS_INODE_NOATIME; else inode_flags &= ~BTRFS_INODE_NOATIME; /* If coming from FS_IOC_FSSETXATTR then skip unconverted flags */ if (!fa->flags_valid) { /* 1 item for the inode */ trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) return PTR_ERR(trans); goto update_flags; } if (fsflags & FS_DIRSYNC_FL) inode_flags |= BTRFS_INODE_DIRSYNC; else inode_flags &= ~BTRFS_INODE_DIRSYNC; if (fsflags & FS_NOCOW_FL) { if (S_ISREG(inode->vfs_inode.i_mode)) { /* * It's safe to turn csums off here, no extents exist. * Otherwise we want the flag to reflect the real COW * status of the file and will not set it. */ if (inode->vfs_inode.i_size == 0) inode_flags |= BTRFS_INODE_NODATACOW | BTRFS_INODE_NODATASUM; } else { inode_flags |= BTRFS_INODE_NODATACOW; } } else { /* * Revert back under same assumptions as above */ if (S_ISREG(inode->vfs_inode.i_mode)) { if (inode->vfs_inode.i_size == 0) inode_flags &= ~(BTRFS_INODE_NODATACOW | BTRFS_INODE_NODATASUM); } else { inode_flags &= ~BTRFS_INODE_NODATACOW; } } /* * The COMPRESS flag can only be changed by users, while the NOCOMPRESS * flag may be changed automatically if compression code won't make * things smaller. */ if (fsflags & FS_NOCOMP_FL) { inode_flags &= ~BTRFS_INODE_COMPRESS; inode_flags |= BTRFS_INODE_NOCOMPRESS; } else if (fsflags & FS_COMPR_FL) { if (IS_SWAPFILE(&inode->vfs_inode)) return -ETXTBSY; inode_flags |= BTRFS_INODE_COMPRESS; inode_flags &= ~BTRFS_INODE_NOCOMPRESS; comp = btrfs_compress_type2str(fs_info->compress_type); if (!comp || comp[0] == 0) comp = btrfs_compress_type2str(BTRFS_COMPRESS_ZLIB); } else { inode_flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); } /* * 1 for inode item * 2 for properties */ trans = btrfs_start_transaction(root, 3); if (IS_ERR(trans)) return PTR_ERR(trans); if (comp) { ret = btrfs_set_prop(trans, inode, "btrfs.compression", comp, strlen(comp), 0); if (ret) { btrfs_abort_transaction(trans, ret); goto out_end_trans; } } else { ret = btrfs_set_prop(trans, inode, "btrfs.compression", NULL, 0, 0); if (ret && ret != -ENODATA) { btrfs_abort_transaction(trans, ret); goto out_end_trans; } } update_flags: inode->flags = inode_flags; btrfs_update_inode_mapping_flags(inode); btrfs_sync_inode_flags_to_i_flags(inode); inode_inc_iversion(&inode->vfs_inode); inode_set_ctime_current(&inode->vfs_inode); ret = btrfs_update_inode(trans, inode); out_end_trans: btrfs_end_transaction(trans); return ret; } static int btrfs_ioctl_getversion(const struct inode *inode, int __user *arg) { return put_user(inode->i_generation, arg); } static noinline int btrfs_ioctl_fitrim(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_device *device; struct fstrim_range range; u64 minlen = ULLONG_MAX; u64 num_devices = 0; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* * btrfs_trim_block_group() depends on space cache, which is not * available in zoned filesystem. So, disallow fitrim on a zoned * filesystem for now. */ if (btrfs_is_zoned(fs_info)) return -EOPNOTSUPP; /* * If the fs is mounted with nologreplay, which requires it to be * mounted in RO mode as well, we can not allow discard on free space * inside block groups, because log trees refer to extents that are not * pinned in a block group's free space cache (pinning the extents is * precisely the first phase of replaying a log tree). */ if (btrfs_test_opt(fs_info, NOLOGREPLAY)) return -EROFS; rcu_read_lock(); list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, dev_list) { if (!device->bdev || !bdev_max_discard_sectors(device->bdev)) continue; num_devices++; minlen = min_t(u64, bdev_discard_granularity(device->bdev), minlen); } rcu_read_unlock(); if (!num_devices) return -EOPNOTSUPP; if (copy_from_user(&range, arg, sizeof(range))) return -EFAULT; /* * NOTE: Don't truncate the range using super->total_bytes. Bytenr of * block group is in the logical address space, which can be any * sectorsize aligned bytenr in the range [0, U64_MAX]. */ if (range.len < fs_info->sectorsize) return -EINVAL; range.minlen = max(range.minlen, minlen); ret = btrfs_trim_fs(fs_info, &range); if (copy_to_user(arg, &range, sizeof(range))) return -EFAULT; return ret; } /* * Calculate the number of transaction items to reserve for creating a subvolume * or snapshot, not including the inode, directory entries, or parent directory. */ static unsigned int create_subvol_num_items(const struct btrfs_qgroup_inherit *inherit) { /* * 1 to add root block * 1 to add root item * 1 to add root ref * 1 to add root backref * 1 to add UUID item * 1 to add qgroup info * 1 to add qgroup limit * * Ideally the last two would only be accounted if qgroups are enabled, * but that can change between now and the time we would insert them. */ unsigned int num_items = 7; if (inherit) { /* 2 to add qgroup relations for each inherited qgroup */ num_items += 2 * inherit->num_qgroups; } return num_items; } static noinline int create_subvol(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, struct btrfs_qgroup_inherit *inherit) { struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct btrfs_trans_handle *trans; struct btrfs_key key; struct btrfs_root_item *root_item; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *new_root; struct btrfs_block_rsv block_rsv; struct timespec64 cur_time = current_time(dir); struct btrfs_new_inode_args new_inode_args = { .dir = dir, .dentry = dentry, .subvol = true, }; unsigned int trans_num_items; int ret; dev_t anon_dev; u64 objectid; u64 qgroup_reserved = 0; root_item = kzalloc(sizeof(*root_item), GFP_KERNEL); if (!root_item) return -ENOMEM; ret = btrfs_get_free_objectid(fs_info->tree_root, &objectid); if (ret) goto out_root_item; /* * Don't create subvolume whose level is not zero. Or qgroup will be * screwed up since it assumes subvolume qgroup's level to be 0. */ if (btrfs_qgroup_level(objectid)) { ret = -ENOSPC; goto out_root_item; } ret = get_anon_bdev(&anon_dev); if (ret < 0) goto out_root_item; new_inode_args.inode = btrfs_new_subvol_inode(idmap, dir); if (!new_inode_args.inode) { ret = -ENOMEM; goto out_anon_dev; } ret = btrfs_new_inode_prepare(&new_inode_args, &trans_num_items); if (ret) goto out_inode; trans_num_items += create_subvol_num_items(inherit); btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, trans_num_items, false); if (ret) goto out_new_inode_args; qgroup_reserved = block_rsv.qgroup_rsv_reserved; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_release_rsv; } btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); qgroup_reserved = 0; trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; ret = btrfs_qgroup_inherit(trans, 0, objectid, btrfs_root_id(root), inherit); if (ret) goto out; leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); goto out; } btrfs_mark_buffer_dirty(trans, leaf); inode_item = &root_item->inode; btrfs_set_stack_inode_generation(inode_item, 1); btrfs_set_stack_inode_size(inode_item, 3); btrfs_set_stack_inode_nlink(inode_item, 1); btrfs_set_stack_inode_nbytes(inode_item, fs_info->nodesize); btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); btrfs_set_root_flags(root_item, 0); btrfs_set_root_limit(root_item, 0); btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT); btrfs_set_root_bytenr(root_item, leaf->start); btrfs_set_root_generation(root_item, trans->transid); btrfs_set_root_level(root_item, 0); btrfs_set_root_refs(root_item, 1); btrfs_set_root_used(root_item, leaf->len); btrfs_set_root_last_snapshot(root_item, 0); btrfs_set_root_generation_v2(root_item, btrfs_root_generation(root_item)); generate_random_guid(root_item->uuid); btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec); btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec); root_item->ctime = root_item->otime; btrfs_set_root_ctransid(root_item, trans->transid); btrfs_set_root_otransid(root_item, trans->transid); btrfs_tree_unlock(leaf); btrfs_set_root_dirid(root_item, BTRFS_FIRST_FREE_OBJECTID); key.objectid = objectid; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = 0; ret = btrfs_insert_root(trans, fs_info->tree_root, &key, root_item); if (ret) { int ret2; /* * Since we don't abort the transaction in this case, free the * tree block so that we don't leak space and leave the * filesystem in an inconsistent state (an extent item in the * extent tree with a backreference for a root that does not * exists). */ btrfs_tree_lock(leaf); btrfs_clear_buffer_dirty(trans, leaf); btrfs_tree_unlock(leaf); ret2 = btrfs_free_tree_block(trans, objectid, leaf, 0, 1); if (ret2 < 0) btrfs_abort_transaction(trans, ret2); free_extent_buffer(leaf); goto out; } free_extent_buffer(leaf); leaf = NULL; new_root = btrfs_get_new_fs_root(fs_info, objectid, &anon_dev); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); btrfs_abort_transaction(trans, ret); goto out; } /* anon_dev is owned by new_root now. */ anon_dev = 0; BTRFS_I(new_inode_args.inode)->root = new_root; /* ... and new_root is owned by new_inode_args.inode now. */ ret = btrfs_record_root_in_trans(trans, new_root); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } ret = btrfs_uuid_tree_add(trans, root_item->uuid, BTRFS_UUID_KEY_SUBVOL, objectid); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } btrfs_record_new_subvolume(trans, BTRFS_I(dir)); ret = btrfs_create_new_inode(trans, &new_inode_args); if (ret) { btrfs_abort_transaction(trans, ret); goto out; } d_instantiate_new(dentry, new_inode_args.inode); new_inode_args.inode = NULL; out: trans->block_rsv = NULL; trans->bytes_reserved = 0; btrfs_end_transaction(trans); out_release_rsv: btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); if (qgroup_reserved) btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); out_new_inode_args: btrfs_new_inode_args_destroy(&new_inode_args); out_inode: iput(new_inode_args.inode); out_anon_dev: if (anon_dev) free_anon_bdev(anon_dev); out_root_item: kfree(root_item); return ret; } static int create_snapshot(struct btrfs_root *root, struct inode *dir, struct dentry *dentry, bool readonly, struct btrfs_qgroup_inherit *inherit) { struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct inode *inode; struct btrfs_pending_snapshot *pending_snapshot; unsigned int trans_num_items; struct btrfs_trans_handle *trans; struct btrfs_block_rsv *block_rsv; u64 qgroup_reserved = 0; int ret; /* We do not support snapshotting right now. */ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { btrfs_warn(fs_info, "extent tree v2 doesn't support snapshotting yet"); return -EOPNOTSUPP; } if (btrfs_root_refs(&root->root_item) == 0) return -ENOENT; if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) return -EINVAL; if (atomic_read(&root->nr_swapfiles)) { btrfs_warn(fs_info, "cannot snapshot subvolume with active swapfile"); return -ETXTBSY; } pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_KERNEL); if (!pending_snapshot) return -ENOMEM; ret = get_anon_bdev(&pending_snapshot->anon_dev); if (ret < 0) goto free_pending; pending_snapshot->root_item = kzalloc(sizeof(struct btrfs_root_item), GFP_KERNEL); pending_snapshot->path = btrfs_alloc_path(); if (!pending_snapshot->root_item || !pending_snapshot->path) { ret = -ENOMEM; goto free_pending; } block_rsv = &pending_snapshot->block_rsv; btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP); /* * 1 to add dir item * 1 to add dir index * 1 to update parent inode item */ trans_num_items = create_subvol_num_items(inherit) + 3; ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv, trans_num_items, false); if (ret) goto free_pending; qgroup_reserved = block_rsv->qgroup_rsv_reserved; pending_snapshot->dentry = dentry; pending_snapshot->root = root; pending_snapshot->readonly = readonly; pending_snapshot->dir = BTRFS_I(dir); pending_snapshot->inherit = inherit; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto fail; } ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root); if (ret) { btrfs_end_transaction(trans); goto fail; } btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); qgroup_reserved = 0; trans->pending_snapshot = pending_snapshot; ret = btrfs_commit_transaction(trans); if (ret) goto fail; ret = pending_snapshot->error; if (ret) goto fail; ret = btrfs_orphan_cleanup(pending_snapshot->snap); if (ret) goto fail; inode = btrfs_lookup_dentry(d_inode(dentry->d_parent), dentry); if (IS_ERR(inode)) { ret = PTR_ERR(inode); goto fail; } d_instantiate(dentry, inode); ret = 0; pending_snapshot->anon_dev = 0; fail: /* Prevent double freeing of anon_dev */ if (ret && pending_snapshot->snap) pending_snapshot->snap->anon_dev = 0; btrfs_put_root(pending_snapshot->snap); btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL); if (qgroup_reserved) btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); free_pending: if (pending_snapshot->anon_dev) free_anon_bdev(pending_snapshot->anon_dev); kfree(pending_snapshot->root_item); btrfs_free_path(pending_snapshot->path); kfree(pending_snapshot); return ret; } /* copy of may_delete in fs/namei.c() * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do anything with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int btrfs_may_delete(struct mnt_idmap *idmap, struct inode *dir, struct dentry *victim, int isdir) { int error; if (d_really_is_negative(victim)) return -ENOENT; /* The @victim is not inside @dir. */ if (d_inode(victim->d_parent) != dir) return -EINVAL; audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(idmap, dir, d_inode(victim)) || IS_APPEND(d_inode(victim)) || IS_IMMUTABLE(d_inode(victim)) || IS_SWAPFILE(d_inode(victim))) return -EPERM; if (isdir) { if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* copy of may_create in fs/namei.c() */ static inline int btrfs_may_create(struct mnt_idmap *idmap, struct inode *dir, const struct dentry *child) { if (d_really_is_positive(child)) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; if (!fsuidgid_has_mapping(dir->i_sb, idmap)) return -EOVERFLOW; return inode_permission(idmap, dir, MAY_WRITE | MAY_EXEC); } /* * Create a new subvolume below @parent. This is largely modeled after * sys_mkdirat and vfs_mkdir, but we only do a single component lookup * inside this filesystem so it's quite a bit simpler. */ static noinline int btrfs_mksubvol(const struct path *parent, struct mnt_idmap *idmap, const char *name, int namelen, struct btrfs_root *snap_src, bool readonly, struct btrfs_qgroup_inherit *inherit) { struct inode *dir = d_inode(parent->dentry); struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct dentry *dentry; struct fscrypt_str name_str = FSTR_INIT((char *)name, namelen); int error; error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); if (error == -EINTR) return error; dentry = lookup_one(idmap, &QSTR_LEN(name, namelen), parent->dentry); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_unlock; error = btrfs_may_create(idmap, dir, dentry); if (error) goto out_dput; /* * even if this name doesn't exist, we may get hash collisions. * check for them now when we can safely fail */ error = btrfs_check_dir_item_collision(BTRFS_I(dir)->root, dir->i_ino, &name_str); if (error) goto out_dput; down_read(&fs_info->subvol_sem); if (btrfs_root_refs(&BTRFS_I(dir)->root->root_item) == 0) goto out_up_read; if (snap_src) error = create_snapshot(snap_src, dir, dentry, readonly, inherit); else error = create_subvol(idmap, dir, dentry, inherit); if (!error) fsnotify_mkdir(dir, dentry); out_up_read: up_read(&fs_info->subvol_sem); out_dput: dput(dentry); out_unlock: btrfs_inode_unlock(BTRFS_I(dir), 0); return error; } static noinline int btrfs_mksnapshot(const struct path *parent, struct mnt_idmap *idmap, const char *name, int namelen, struct btrfs_root *root, bool readonly, struct btrfs_qgroup_inherit *inherit) { int ret; /* * Force new buffered writes to reserve space even when NOCOW is * possible. This is to avoid later writeback (running dealloc) to * fallback to COW mode and unexpectedly fail with ENOSPC. */ btrfs_drew_read_lock(&root->snapshot_lock); ret = btrfs_start_delalloc_snapshot(root, false); if (ret) goto out; /* * All previous writes have started writeback in NOCOW mode, so now * we force future writes to fallback to COW mode during snapshot * creation. */ atomic_inc(&root->snapshot_force_cow); btrfs_wait_ordered_extents(root, U64_MAX, NULL); ret = btrfs_mksubvol(parent, idmap, name, namelen, root, readonly, inherit); atomic_dec(&root->snapshot_force_cow); out: btrfs_drew_read_unlock(&root->snapshot_lock); return ret; } /* * Try to start exclusive operation @type or cancel it if it's running. * * Return: * 0 - normal mode, newly claimed op started * >0 - normal mode, something else is running, * return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS to user space * ECANCELED - cancel mode, successful cancel * ENOTCONN - cancel mode, operation not running anymore */ static int exclop_start_or_cancel_reloc(struct btrfs_fs_info *fs_info, enum btrfs_exclusive_operation type, bool cancel) { if (!cancel) { /* Start normal op */ if (!btrfs_exclop_start(fs_info, type)) return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; /* Exclusive operation is now claimed */ return 0; } /* Cancel running op */ if (btrfs_exclop_start_try_lock(fs_info, type)) { /* * This blocks any exclop finish from setting it to NONE, so we * request cancellation. Either it runs and we will wait for it, * or it has finished and no waiting will happen. */ atomic_inc(&fs_info->reloc_cancel_req); btrfs_exclop_start_unlock(fs_info); if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) wait_on_bit(&fs_info->flags, BTRFS_FS_RELOC_RUNNING, TASK_INTERRUPTIBLE); return -ECANCELED; } /* Something else is running or none */ return -ENOTCONN; } static noinline int btrfs_ioctl_resize(struct file *file, void __user *arg) { BTRFS_DEV_LOOKUP_ARGS(args); struct btrfs_root *root = BTRFS_I(file_inode(file))->root; struct btrfs_fs_info *fs_info = root->fs_info; u64 new_size; u64 old_size; u64 devid = 1; struct btrfs_ioctl_vol_args *vol_args; struct btrfs_device *device = NULL; char *sizestr; char *devstr = NULL; int ret = 0; int mod = 0; bool cancel; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; /* * Read the arguments before checking exclusivity to be able to * distinguish regular resize and cancel */ vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { ret = PTR_ERR(vol_args); goto out_drop; } ret = btrfs_check_ioctl_vol_args_path(vol_args); if (ret < 0) goto out_free; sizestr = vol_args->name; cancel = (strcmp("cancel", sizestr) == 0); ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_RESIZE, cancel); if (ret) goto out_free; /* Exclusive operation is now claimed */ devstr = strchr(sizestr, ':'); if (devstr) { sizestr = devstr + 1; *devstr = '\0'; devstr = vol_args->name; ret = kstrtoull(devstr, 10, &devid); if (ret) goto out_finish; if (!devid) { ret = -EINVAL; goto out_finish; } btrfs_info(fs_info, "resizing devid %llu", devid); } args.devid = devid; device = btrfs_find_device(fs_info->fs_devices, &args); if (!device) { btrfs_info(fs_info, "resizer unable to find device %llu", devid); ret = -ENODEV; goto out_finish; } if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) { btrfs_info(fs_info, "resizer unable to apply on readonly device %llu", devid); ret = -EPERM; goto out_finish; } if (!strcmp(sizestr, "max")) new_size = bdev_nr_bytes(device->bdev); else { char *retptr; if (sizestr[0] == '-') { mod = -1; sizestr++; } else if (sizestr[0] == '+') { mod = 1; sizestr++; } new_size = memparse(sizestr, &retptr); if (*retptr != '\0' || new_size == 0) { ret = -EINVAL; goto out_finish; } } if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) { ret = -EPERM; goto out_finish; } old_size = btrfs_device_get_total_bytes(device); if (mod < 0) { if (new_size > old_size) { ret = -EINVAL; goto out_finish; } new_size = old_size - new_size; } else if (mod > 0) { if (new_size > ULLONG_MAX - old_size) { ret = -ERANGE; goto out_finish; } new_size = old_size + new_size; } if (new_size < SZ_256M) { ret = -EINVAL; goto out_finish; } if (new_size > bdev_nr_bytes(device->bdev)) { ret = -EFBIG; goto out_finish; } new_size = round_down(new_size, fs_info->sectorsize); if (new_size > old_size) { struct btrfs_trans_handle *trans; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_finish; } ret = btrfs_grow_device(trans, device, new_size); btrfs_commit_transaction(trans); } else if (new_size < old_size) { ret = btrfs_shrink_device(device, new_size); } /* equal, nothing need to do */ if (ret == 0 && new_size != old_size) btrfs_info_in_rcu(fs_info, "resize device %s (devid %llu) from %llu to %llu", btrfs_dev_name(device), device->devid, old_size, new_size); out_finish: btrfs_exclop_finish(fs_info); out_free: kfree(vol_args); out_drop: mnt_drop_write_file(file); return ret; } static noinline int __btrfs_ioctl_snap_create(struct file *file, struct mnt_idmap *idmap, const char *name, unsigned long fd, int subvol, bool readonly, struct btrfs_qgroup_inherit *inherit) { int namelen; int ret = 0; if (!S_ISDIR(file_inode(file)->i_mode)) return -ENOTDIR; ret = mnt_want_write_file(file); if (ret) goto out; namelen = strlen(name); if (strchr(name, '/')) { ret = -EINVAL; goto out_drop_write; } if (name[0] == '.' && (namelen == 1 || (name[1] == '.' && namelen == 2))) { ret = -EEXIST; goto out_drop_write; } if (subvol) { ret = btrfs_mksubvol(&file->f_path, idmap, name, namelen, NULL, readonly, inherit); } else { CLASS(fd, src)(fd); struct inode *src_inode; if (fd_empty(src)) { ret = -EINVAL; goto out_drop_write; } src_inode = file_inode(fd_file(src)); if (src_inode->i_sb != file_inode(file)->i_sb) { btrfs_info(BTRFS_I(file_inode(file))->root->fs_info, "Snapshot src from another FS"); ret = -EXDEV; } else if (!inode_owner_or_capable(idmap, src_inode)) { /* * Subvolume creation is not restricted, but snapshots * are limited to own subvolumes only */ ret = -EPERM; } else if (btrfs_ino(BTRFS_I(src_inode)) != BTRFS_FIRST_FREE_OBJECTID) { /* * Snapshots must be made with the src_inode referring * to the subvolume inode, otherwise the permission * checking above is useless because we may have * permission on a lower directory but not the subvol * itself. */ ret = -EINVAL; } else { ret = btrfs_mksnapshot(&file->f_path, idmap, name, namelen, BTRFS_I(src_inode)->root, readonly, inherit); } } out_drop_write: mnt_drop_write_file(file); out: return ret; } static noinline int btrfs_ioctl_snap_create(struct file *file, void __user *arg, int subvol) { struct btrfs_ioctl_vol_args *vol_args; int ret; if (!S_ISDIR(file_inode(file)->i_mode)) return -ENOTDIR; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); ret = btrfs_check_ioctl_vol_args_path(vol_args); if (ret < 0) goto out; ret = __btrfs_ioctl_snap_create(file, file_mnt_idmap(file), vol_args->name, vol_args->fd, subvol, false, NULL); out: kfree(vol_args); return ret; } static noinline int btrfs_ioctl_snap_create_v2(struct file *file, void __user *arg, int subvol) { struct btrfs_ioctl_vol_args_v2 *vol_args; int ret; bool readonly = false; struct btrfs_qgroup_inherit *inherit = NULL; if (!S_ISDIR(file_inode(file)->i_mode)) return -ENOTDIR; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args); if (ret < 0) goto free_args; if (vol_args->flags & ~BTRFS_SUBVOL_CREATE_ARGS_MASK) { ret = -EOPNOTSUPP; goto free_args; } if (vol_args->flags & BTRFS_SUBVOL_RDONLY) readonly = true; if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { struct btrfs_fs_info *fs_info = inode_to_fs_info(file_inode(file)); if (vol_args->size < sizeof(*inherit) || vol_args->size > PAGE_SIZE) { ret = -EINVAL; goto free_args; } inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size); if (IS_ERR(inherit)) { ret = PTR_ERR(inherit); goto free_args; } ret = btrfs_qgroup_check_inherit(fs_info, inherit, vol_args->size); if (ret < 0) goto free_inherit; } ret = __btrfs_ioctl_snap_create(file, file_mnt_idmap(file), vol_args->name, vol_args->fd, subvol, readonly, inherit); if (ret) goto free_inherit; free_inherit: kfree(inherit); free_args: kfree(vol_args); return ret; } static noinline int btrfs_ioctl_subvol_getflags(struct btrfs_inode *inode, void __user *arg) { struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; int ret = 0; u64 flags = 0; if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) return -EINVAL; down_read(&fs_info->subvol_sem); if (btrfs_root_readonly(root)) flags |= BTRFS_SUBVOL_RDONLY; up_read(&fs_info->subvol_sem); if (copy_to_user(arg, &flags, sizeof(flags))) ret = -EFAULT; return ret; } static noinline int btrfs_ioctl_subvol_setflags(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; u64 root_flags; u64 flags; int ret = 0; if (!inode_owner_or_capable(file_mnt_idmap(file), inode)) return -EPERM; ret = mnt_want_write_file(file); if (ret) goto out; if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { ret = -EINVAL; goto out_drop_write; } if (copy_from_user(&flags, arg, sizeof(flags))) { ret = -EFAULT; goto out_drop_write; } if (flags & ~BTRFS_SUBVOL_RDONLY) { ret = -EOPNOTSUPP; goto out_drop_write; } down_write(&fs_info->subvol_sem); /* nothing to do */ if (!!(flags & BTRFS_SUBVOL_RDONLY) == btrfs_root_readonly(root)) goto out_drop_sem; root_flags = btrfs_root_flags(&root->root_item); if (flags & BTRFS_SUBVOL_RDONLY) { btrfs_set_root_flags(&root->root_item, root_flags | BTRFS_ROOT_SUBVOL_RDONLY); } else { /* * Block RO -> RW transition if this subvolume is involved in * send */ spin_lock(&root->root_item_lock); if (root->send_in_progress == 0) { btrfs_set_root_flags(&root->root_item, root_flags & ~BTRFS_ROOT_SUBVOL_RDONLY); spin_unlock(&root->root_item_lock); } else { spin_unlock(&root->root_item_lock); btrfs_warn(fs_info, "Attempt to set subvolume %llu read-write during send", btrfs_root_id(root)); ret = -EPERM; goto out_drop_sem; } } trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_reset; } ret = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); if (ret < 0) { btrfs_end_transaction(trans); goto out_reset; } ret = btrfs_commit_transaction(trans); out_reset: if (ret) btrfs_set_root_flags(&root->root_item, root_flags); out_drop_sem: up_write(&fs_info->subvol_sem); out_drop_write: mnt_drop_write_file(file); out: return ret; } static noinline bool key_in_sk(const struct btrfs_key *key, const struct btrfs_ioctl_search_key *sk) { struct btrfs_key test; int ret; test.objectid = sk->min_objectid; test.type = sk->min_type; test.offset = sk->min_offset; ret = btrfs_comp_cpu_keys(key, &test); if (ret < 0) return false; test.objectid = sk->max_objectid; test.type = sk->max_type; test.offset = sk->max_offset; ret = btrfs_comp_cpu_keys(key, &test); if (ret > 0) return false; return true; } static noinline int copy_to_sk(struct btrfs_path *path, struct btrfs_key *key, const struct btrfs_ioctl_search_key *sk, u64 *buf_size, char __user *ubuf, unsigned long *sk_offset, int *num_found) { u64 found_transid; struct extent_buffer *leaf; struct btrfs_ioctl_search_header sh; struct btrfs_key test; unsigned long item_off; unsigned long item_len; int nritems; int i; int slot; int ret = 0; leaf = path->nodes[0]; slot = path->slots[0]; nritems = btrfs_header_nritems(leaf); if (btrfs_header_generation(leaf) > sk->max_transid) { i = nritems; goto advance_key; } found_transid = btrfs_header_generation(leaf); for (i = slot; i < nritems; i++) { item_off = btrfs_item_ptr_offset(leaf, i); item_len = btrfs_item_size(leaf, i); btrfs_item_key_to_cpu(leaf, key, i); if (!key_in_sk(key, sk)) continue; if (sizeof(sh) + item_len > *buf_size) { if (*num_found) { ret = 1; goto out; } /* * return one empty item back for v1, which does not * handle -EOVERFLOW */ *buf_size = sizeof(sh) + item_len; item_len = 0; ret = -EOVERFLOW; } if (sizeof(sh) + item_len + *sk_offset > *buf_size) { ret = 1; goto out; } sh.objectid = key->objectid; sh.type = key->type; sh.offset = key->offset; sh.len = item_len; sh.transid = found_transid; /* * Copy search result header. If we fault then loop again so we * can fault in the pages and -EFAULT there if there's a * problem. Otherwise we'll fault and then copy the buffer in * properly this next time through */ if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) { ret = 0; goto out; } *sk_offset += sizeof(sh); if (item_len) { char __user *up = ubuf + *sk_offset; /* * Copy the item, same behavior as above, but reset the * * sk_offset so we copy the full thing again. */ if (read_extent_buffer_to_user_nofault(leaf, up, item_off, item_len)) { ret = 0; *sk_offset -= sizeof(sh); goto out; } *sk_offset += item_len; } (*num_found)++; if (ret) /* -EOVERFLOW from above */ goto out; if (*num_found >= sk->nr_items) { ret = 1; goto out; } } advance_key: ret = 0; test.objectid = sk->max_objectid; test.type = sk->max_type; test.offset = sk->max_offset; if (btrfs_comp_cpu_keys(key, &test) >= 0) ret = 1; else if (key->offset < (u64)-1) key->offset++; else if (key->type < (u8)-1) { key->offset = 0; key->type++; } else if (key->objectid < (u64)-1) { key->offset = 0; key->type = 0; key->objectid++; } else ret = 1; out: /* * 0: all items from this leaf copied, continue with next * 1: * more items can be copied, but unused buffer is too small * * all items were found * Either way, it will stops the loop which iterates to the next * leaf * -EOVERFLOW: item was to large for buffer * -EFAULT: could not copy extent buffer back to userspace */ return ret; } static noinline int search_ioctl(struct btrfs_root *root, struct btrfs_ioctl_search_key *sk, u64 *buf_size, char __user *ubuf) { struct btrfs_fs_info *info = root->fs_info; struct btrfs_key key; struct btrfs_path *path; int ret; int num_found = 0; unsigned long sk_offset = 0; if (*buf_size < sizeof(struct btrfs_ioctl_search_header)) { *buf_size = sizeof(struct btrfs_ioctl_search_header); return -EOVERFLOW; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; if (sk->tree_id == 0) { /* Search the root that we got passed. */ root = btrfs_grab_root(root); } else { /* Look up the root from the arguments. */ root = btrfs_get_fs_root(info, sk->tree_id, true); if (IS_ERR(root)) { btrfs_free_path(path); return PTR_ERR(root); } } key.objectid = sk->min_objectid; key.type = sk->min_type; key.offset = sk->min_offset; while (1) { /* * Ensure that the whole user buffer is faulted in at sub-page * granularity, otherwise the loop may live-lock. */ if (fault_in_subpage_writeable(ubuf + sk_offset, *buf_size - sk_offset)) { ret = -EFAULT; break; } ret = btrfs_search_forward(root, &key, path, sk->min_transid); if (ret) break; ret = copy_to_sk(path, &key, sk, buf_size, ubuf, &sk_offset, &num_found); btrfs_release_path(path); if (ret) break; } /* Normalize return values from btrfs_search_forward() and copy_to_sk(). */ if (ret > 0) ret = 0; sk->nr_items = num_found; btrfs_put_root(root); btrfs_free_path(path); return ret; } static noinline int btrfs_ioctl_tree_search(struct btrfs_root *root, void __user *argp) { struct btrfs_ioctl_search_args __user *uargs = argp; struct btrfs_ioctl_search_key sk; int ret; u64 buf_size; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&sk, &uargs->key, sizeof(sk))) return -EFAULT; buf_size = sizeof(uargs->buf); ret = search_ioctl(root, &sk, &buf_size, uargs->buf); /* * In the origin implementation an overflow is handled by returning a * search header with a len of zero, so reset ret. */ if (ret == -EOVERFLOW) ret = 0; if (ret == 0 && copy_to_user(&uargs->key, &sk, sizeof(sk))) ret = -EFAULT; return ret; } static noinline int btrfs_ioctl_tree_search_v2(struct btrfs_root *root, void __user *argp) { struct btrfs_ioctl_search_args_v2 __user *uarg = argp; struct btrfs_ioctl_search_args_v2 args; int ret; u64 buf_size; const u64 buf_limit = SZ_16M; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* copy search header and buffer size */ if (copy_from_user(&args, uarg, sizeof(args))) return -EFAULT; buf_size = args.buf_size; /* limit result size to 16MB */ if (buf_size > buf_limit) buf_size = buf_limit; ret = search_ioctl(root, &args.key, &buf_size, (char __user *)(&uarg->buf[0])); if (ret == 0 && copy_to_user(&uarg->key, &args.key, sizeof(args.key))) ret = -EFAULT; else if (ret == -EOVERFLOW && copy_to_user(&uarg->buf_size, &buf_size, sizeof(buf_size))) ret = -EFAULT; return ret; } /* * Search INODE_REFs to identify path name of 'dirid' directory * in a 'tree_id' tree. and sets path name to 'name'. */ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, u64 tree_id, u64 dirid, char *name) { struct btrfs_root *root; struct btrfs_key key; char *ptr; int ret = -1; int slot; int len; int total_len = 0; struct btrfs_inode_ref *iref; struct extent_buffer *l; struct btrfs_path *path; if (dirid == BTRFS_FIRST_FREE_OBJECTID) { name[0]='\0'; return 0; } path = btrfs_alloc_path(); if (!path) return -ENOMEM; ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1]; root = btrfs_get_fs_root(info, tree_id, true); if (IS_ERR(root)) { ret = PTR_ERR(root); root = NULL; goto out; } key.objectid = dirid; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; while (1) { ret = btrfs_search_backwards(root, &key, path); if (ret < 0) goto out; else if (ret > 0) { ret = -ENOENT; goto out; } l = path->nodes[0]; slot = path->slots[0]; iref = btrfs_item_ptr(l, slot, struct btrfs_inode_ref); len = btrfs_inode_ref_name_len(l, iref); ptr -= len + 1; total_len += len + 1; if (ptr < name) { ret = -ENAMETOOLONG; goto out; } *(ptr + len) = '/'; read_extent_buffer(l, ptr, (unsigned long)(iref + 1), len); if (key.offset == BTRFS_FIRST_FREE_OBJECTID) break; btrfs_release_path(path); key.objectid = key.offset; key.offset = (u64)-1; dirid = key.objectid; } memmove(name, ptr, total_len); name[total_len] = '\0'; ret = 0; out: btrfs_put_root(root); btrfs_free_path(path); return ret; } static int btrfs_search_path_in_tree_user(struct mnt_idmap *idmap, struct inode *inode, struct btrfs_ioctl_ino_lookup_user_args *args) { struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; u64 upper_limit = btrfs_ino(BTRFS_I(inode)); u64 treeid = btrfs_root_id(BTRFS_I(inode)->root); u64 dirid = args->dirid; unsigned long item_off; unsigned long item_len; struct btrfs_inode_ref *iref; struct btrfs_root_ref *rref; struct btrfs_root *root = NULL; struct btrfs_path *path; struct btrfs_key key, key2; struct extent_buffer *leaf; char *ptr; int slot; int len; int total_len = 0; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; /* * If the bottom subvolume does not exist directly under upper_limit, * construct the path in from the bottom up. */ if (dirid != upper_limit) { ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1]; root = btrfs_get_fs_root(fs_info, treeid, true); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out; } key.objectid = dirid; key.type = BTRFS_INODE_REF_KEY; key.offset = (u64)-1; while (1) { struct btrfs_inode *temp_inode; ret = btrfs_search_backwards(root, &key, path); if (ret < 0) goto out_put; else if (ret > 0) { ret = -ENOENT; goto out_put; } leaf = path->nodes[0]; slot = path->slots[0]; iref = btrfs_item_ptr(leaf, slot, struct btrfs_inode_ref); len = btrfs_inode_ref_name_len(leaf, iref); ptr -= len + 1; total_len += len + 1; if (ptr < args->path) { ret = -ENAMETOOLONG; goto out_put; } *(ptr + len) = '/'; read_extent_buffer(leaf, ptr, (unsigned long)(iref + 1), len); /* Check the read+exec permission of this directory */ ret = btrfs_previous_item(root, path, dirid, BTRFS_INODE_ITEM_KEY); if (ret < 0) { goto out_put; } else if (ret > 0) { ret = -ENOENT; goto out_put; } leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key2, slot); if (key2.objectid != dirid) { ret = -ENOENT; goto out_put; } /* * We don't need the path anymore, so release it and * avoid deadlocks and lockdep warnings in case * btrfs_iget() needs to lookup the inode from its root * btree and lock the same leaf. */ btrfs_release_path(path); temp_inode = btrfs_iget(key2.objectid, root); if (IS_ERR(temp_inode)) { ret = PTR_ERR(temp_inode); goto out_put; } ret = inode_permission(idmap, &temp_inode->vfs_inode, MAY_READ | MAY_EXEC); iput(&temp_inode->vfs_inode); if (ret) { ret = -EACCES; goto out_put; } if (key.offset == upper_limit) break; if (key.objectid == BTRFS_FIRST_FREE_OBJECTID) { ret = -EACCES; goto out_put; } key.objectid = key.offset; key.offset = (u64)-1; dirid = key.objectid; } memmove(args->path, ptr, total_len); args->path[total_len] = '\0'; btrfs_put_root(root); root = NULL; btrfs_release_path(path); } /* Get the bottom subvolume's name from ROOT_REF */ key.objectid = treeid; key.type = BTRFS_ROOT_REF_KEY; key.offset = args->treeid; ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); if (ret < 0) { goto out; } else if (ret > 0) { ret = -ENOENT; goto out; } leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); item_off = btrfs_item_ptr_offset(leaf, slot); item_len = btrfs_item_size(leaf, slot); /* Check if dirid in ROOT_REF corresponds to passed dirid */ rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref); if (args->dirid != btrfs_root_ref_dirid(leaf, rref)) { ret = -EINVAL; goto out; } /* Copy subvolume's name */ item_off += sizeof(struct btrfs_root_ref); item_len -= sizeof(struct btrfs_root_ref); read_extent_buffer(leaf, args->name, item_off, item_len); args->name[item_len] = 0; out_put: btrfs_put_root(root); out: btrfs_free_path(path); return ret; } static noinline int btrfs_ioctl_ino_lookup(struct btrfs_root *root, void __user *argp) { struct btrfs_ioctl_ino_lookup_args *args; int ret = 0; args = memdup_user(argp, sizeof(*args)); if (IS_ERR(args)) return PTR_ERR(args); /* * Unprivileged query to obtain the containing subvolume root id. The * path is reset so it's consistent with btrfs_search_path_in_tree. */ if (args->treeid == 0) args->treeid = btrfs_root_id(root); if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) { args->name[0] = 0; goto out; } if (!capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out; } ret = btrfs_search_path_in_tree(root->fs_info, args->treeid, args->objectid, args->name); out: if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) ret = -EFAULT; kfree(args); return ret; } /* * Version of ino_lookup ioctl (unprivileged) * * The main differences from ino_lookup ioctl are: * * 1. Read + Exec permission will be checked using inode_permission() during * path construction. -EACCES will be returned in case of failure. * 2. Path construction will be stopped at the inode number which corresponds * to the fd with which this ioctl is called. If constructed path does not * exist under fd's inode, -EACCES will be returned. * 3. The name of bottom subvolume is also searched and filled. */ static int btrfs_ioctl_ino_lookup_user(struct file *file, void __user *argp) { struct btrfs_ioctl_ino_lookup_user_args *args; struct inode *inode; int ret; args = memdup_user(argp, sizeof(*args)); if (IS_ERR(args)) return PTR_ERR(args); inode = file_inode(file); if (args->dirid == BTRFS_FIRST_FREE_OBJECTID && btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { /* * The subvolume does not exist under fd with which this is * called */ kfree(args); return -EACCES; } ret = btrfs_search_path_in_tree_user(file_mnt_idmap(file), inode, args); if (ret == 0 && copy_to_user(argp, args, sizeof(*args))) ret = -EFAULT; kfree(args); return ret; } /* Get the subvolume information in BTRFS_ROOT_ITEM and BTRFS_ROOT_BACKREF */ static int btrfs_ioctl_get_subvol_info(struct inode *inode, void __user *argp) { struct btrfs_ioctl_get_subvol_info_args *subvol_info; struct btrfs_fs_info *fs_info; struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; struct btrfs_root_item *root_item; struct btrfs_root_ref *rref; struct extent_buffer *leaf; unsigned long item_off; unsigned long item_len; int slot; int ret = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; subvol_info = kzalloc(sizeof(*subvol_info), GFP_KERNEL); if (!subvol_info) { btrfs_free_path(path); return -ENOMEM; } fs_info = BTRFS_I(inode)->root->fs_info; /* Get root_item of inode's subvolume */ key.objectid = btrfs_root_id(BTRFS_I(inode)->root); root = btrfs_get_fs_root(fs_info, key.objectid, true); if (IS_ERR(root)) { ret = PTR_ERR(root); goto out_free; } root_item = &root->root_item; subvol_info->treeid = key.objectid; subvol_info->generation = btrfs_root_generation(root_item); subvol_info->flags = btrfs_root_flags(root_item); memcpy(subvol_info->uuid, root_item->uuid, BTRFS_UUID_SIZE); memcpy(subvol_info->parent_uuid, root_item->parent_uuid, BTRFS_UUID_SIZE); memcpy(subvol_info->received_uuid, root_item->received_uuid, BTRFS_UUID_SIZE); subvol_info->ctransid = btrfs_root_ctransid(root_item); subvol_info->ctime.sec = btrfs_stack_timespec_sec(&root_item->ctime); subvol_info->ctime.nsec = btrfs_stack_timespec_nsec(&root_item->ctime); subvol_info->otransid = btrfs_root_otransid(root_item); subvol_info->otime.sec = btrfs_stack_timespec_sec(&root_item->otime); subvol_info->otime.nsec = btrfs_stack_timespec_nsec(&root_item->otime); subvol_info->stransid = btrfs_root_stransid(root_item); subvol_info->stime.sec = btrfs_stack_timespec_sec(&root_item->stime); subvol_info->stime.nsec = btrfs_stack_timespec_nsec(&root_item->stime); subvol_info->rtransid = btrfs_root_rtransid(root_item); subvol_info->rtime.sec = btrfs_stack_timespec_sec(&root_item->rtime); subvol_info->rtime.nsec = btrfs_stack_timespec_nsec(&root_item->rtime); if (key.objectid != BTRFS_FS_TREE_OBJECTID) { /* Search root tree for ROOT_BACKREF of this subvolume */ key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = 0; ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); if (ret < 0) { goto out; } else if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(fs_info->tree_root, path); if (ret < 0) { goto out; } else if (ret > 0) { ret = -EUCLEAN; goto out; } } leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid == subvol_info->treeid && key.type == BTRFS_ROOT_BACKREF_KEY) { subvol_info->parent_id = key.offset; rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref); subvol_info->dirid = btrfs_root_ref_dirid(leaf, rref); item_off = btrfs_item_ptr_offset(leaf, slot) + sizeof(struct btrfs_root_ref); item_len = btrfs_item_size(leaf, slot) - sizeof(struct btrfs_root_ref); read_extent_buffer(leaf, subvol_info->name, item_off, item_len); } else { ret = -ENOENT; goto out; } } btrfs_free_path(path); path = NULL; if (copy_to_user(argp, subvol_info, sizeof(*subvol_info))) ret = -EFAULT; out: btrfs_put_root(root); out_free: btrfs_free_path(path); kfree(subvol_info); return ret; } /* * Return ROOT_REF information of the subvolume containing this inode * except the subvolume name. */ static int btrfs_ioctl_get_subvol_rootref(struct btrfs_root *root, void __user *argp) { struct btrfs_ioctl_get_subvol_rootref_args *rootrefs; struct btrfs_root_ref *rref; struct btrfs_path *path; struct btrfs_key key; struct extent_buffer *leaf; u64 objectid; int slot; int ret; u8 found; path = btrfs_alloc_path(); if (!path) return -ENOMEM; rootrefs = memdup_user(argp, sizeof(*rootrefs)); if (IS_ERR(rootrefs)) { btrfs_free_path(path); return PTR_ERR(rootrefs); } objectid = btrfs_root_id(root); key.objectid = objectid; key.type = BTRFS_ROOT_REF_KEY; key.offset = rootrefs->min_treeid; found = 0; root = root->fs_info->tree_root; ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); if (ret < 0) { goto out; } else if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(root, path); if (ret < 0) { goto out; } else if (ret > 0) { ret = -EUCLEAN; goto out; } } while (1) { leaf = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(leaf, &key, slot); if (key.objectid != objectid || key.type != BTRFS_ROOT_REF_KEY) { ret = 0; goto out; } if (found == BTRFS_MAX_ROOTREF_BUFFER_NUM) { ret = -EOVERFLOW; goto out; } rref = btrfs_item_ptr(leaf, slot, struct btrfs_root_ref); rootrefs->rootref[found].treeid = key.offset; rootrefs->rootref[found].dirid = btrfs_root_ref_dirid(leaf, rref); found++; ret = btrfs_next_item(root, path); if (ret < 0) { goto out; } else if (ret > 0) { ret = -EUCLEAN; goto out; } } out: btrfs_free_path(path); if (!ret || ret == -EOVERFLOW) { rootrefs->num_items = found; /* update min_treeid for next search */ if (found) rootrefs->min_treeid = rootrefs->rootref[found - 1].treeid + 1; if (copy_to_user(argp, rootrefs, sizeof(*rootrefs))) ret = -EFAULT; } kfree(rootrefs); return ret; } static noinline int btrfs_ioctl_snap_destroy(struct file *file, void __user *arg, bool destroy_v2) { struct dentry *parent = file->f_path.dentry; struct dentry *dentry; struct inode *dir = d_inode(parent); struct btrfs_fs_info *fs_info = inode_to_fs_info(dir); struct inode *inode; struct btrfs_root *root = BTRFS_I(dir)->root; struct btrfs_root *dest = NULL; struct btrfs_ioctl_vol_args *vol_args = NULL; struct btrfs_ioctl_vol_args_v2 *vol_args2 = NULL; struct mnt_idmap *idmap = file_mnt_idmap(file); char *subvol_name, *subvol_name_ptr = NULL; int ret = 0; bool destroy_parent = false; /* We don't support snapshots with extent tree v2 yet. */ if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { btrfs_err(fs_info, "extent tree v2 doesn't support snapshot deletion yet"); return -EOPNOTSUPP; } if (destroy_v2) { vol_args2 = memdup_user(arg, sizeof(*vol_args2)); if (IS_ERR(vol_args2)) return PTR_ERR(vol_args2); if (vol_args2->flags & ~BTRFS_SUBVOL_DELETE_ARGS_MASK) { ret = -EOPNOTSUPP; goto out; } /* * If SPEC_BY_ID is not set, we are looking for the subvolume by * name, same as v1 currently does. */ if (!(vol_args2->flags & BTRFS_SUBVOL_SPEC_BY_ID)) { ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args2); if (ret < 0) goto out; subvol_name = vol_args2->name; ret = mnt_want_write_file(file); if (ret) goto out; } else { struct inode *old_dir; if (vol_args2->subvolid < BTRFS_FIRST_FREE_OBJECTID) { ret = -EINVAL; goto out; } ret = mnt_want_write_file(file); if (ret) goto out; dentry = btrfs_get_dentry(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, vol_args2->subvolid, 0); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto out_drop_write; } /* * Change the default parent since the subvolume being * deleted can be outside of the current mount point. */ parent = btrfs_get_parent(dentry); /* * At this point dentry->d_name can point to '/' if the * subvolume we want to destroy is outsite of the * current mount point, so we need to release the * current dentry and execute the lookup to return a new * one with ->d_name pointing to the * <mount point>/subvol_name. */ dput(dentry); if (IS_ERR(parent)) { ret = PTR_ERR(parent); goto out_drop_write; } old_dir = dir; dir = d_inode(parent); /* * If v2 was used with SPEC_BY_ID, a new parent was * allocated since the subvolume can be outside of the * current mount point. Later on we need to release this * new parent dentry. */ destroy_parent = true; /* * On idmapped mounts, deletion via subvolid is * restricted to subvolumes that are immediate * ancestors of the inode referenced by the file * descriptor in the ioctl. Otherwise the idmapping * could potentially be abused to delete subvolumes * anywhere in the filesystem the user wouldn't be able * to delete without an idmapped mount. */ if (old_dir != dir && idmap != &nop_mnt_idmap) { ret = -EOPNOTSUPP; goto free_parent; } subvol_name_ptr = btrfs_get_subvol_name_from_objectid( fs_info, vol_args2->subvolid); if (IS_ERR(subvol_name_ptr)) { ret = PTR_ERR(subvol_name_ptr); goto free_parent; } /* subvol_name_ptr is already nul terminated */ subvol_name = (char *)kbasename(subvol_name_ptr); } } else { vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); ret = btrfs_check_ioctl_vol_args_path(vol_args); if (ret < 0) goto out; subvol_name = vol_args->name; ret = mnt_want_write_file(file); if (ret) goto out; } if (strchr(subvol_name, '/') || strcmp(subvol_name, "..") == 0) { ret = -EINVAL; goto free_subvol_name; } if (!S_ISDIR(dir->i_mode)) { ret = -ENOTDIR; goto free_subvol_name; } ret = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT); if (ret == -EINTR) goto free_subvol_name; dentry = lookup_one(idmap, &QSTR(subvol_name), parent); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto out_unlock_dir; } if (d_really_is_negative(dentry)) { ret = -ENOENT; goto out_dput; } inode = d_inode(dentry); dest = BTRFS_I(inode)->root; if (!capable(CAP_SYS_ADMIN)) { /* * Regular user. Only allow this with a special mount * option, when the user has write+exec access to the * subvol root, and when rmdir(2) would have been * allowed. * * Note that this is _not_ check that the subvol is * empty or doesn't contain data that we wouldn't * otherwise be able to delete. * * Users who want to delete empty subvols should try * rmdir(2). */ ret = -EPERM; if (!btrfs_test_opt(fs_info, USER_SUBVOL_RM_ALLOWED)) goto out_dput; /* * Do not allow deletion if the parent dir is the same * as the dir to be deleted. That means the ioctl * must be called on the dentry referencing the root * of the subvol, not a random directory contained * within it. */ ret = -EINVAL; if (root == dest) goto out_dput; ret = inode_permission(idmap, inode, MAY_WRITE | MAY_EXEC); if (ret) goto out_dput; } /* check if subvolume may be deleted by a user */ ret = btrfs_may_delete(idmap, dir, dentry, 1); if (ret) goto out_dput; if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { ret = -EINVAL; goto out_dput; } btrfs_inode_lock(BTRFS_I(inode), 0); ret = btrfs_delete_subvolume(BTRFS_I(dir), dentry); btrfs_inode_unlock(BTRFS_I(inode), 0); if (!ret) d_delete_notify(dir, dentry); out_dput: dput(dentry); out_unlock_dir: btrfs_inode_unlock(BTRFS_I(dir), 0); free_subvol_name: kfree(subvol_name_ptr); free_parent: if (destroy_parent) dput(parent); out_drop_write: mnt_drop_write_file(file); out: kfree(vol_args2); kfree(vol_args); return ret; } static int btrfs_ioctl_defrag(struct file *file, void __user *argp) { struct inode *inode = file_inode(file); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_defrag_range_args range = {0}; int ret; ret = mnt_want_write_file(file); if (ret) return ret; if (btrfs_root_readonly(root)) { ret = -EROFS; goto out; } switch (inode->i_mode & S_IFMT) { case S_IFDIR: if (!capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out; } ret = btrfs_defrag_root(root); break; case S_IFREG: /* * Note that this does not check the file descriptor for write * access. This prevents defragmenting executables that are * running and allows defrag on files open in read-only mode. */ if (!capable(CAP_SYS_ADMIN) && inode_permission(&nop_mnt_idmap, inode, MAY_WRITE)) { ret = -EPERM; goto out; } /* * Don't allow defrag on pre-content watched files, as it could * populate the page cache with 0's via readahead. */ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) { ret = -EINVAL; goto out; } if (argp) { if (copy_from_user(&range, argp, sizeof(range))) { ret = -EFAULT; goto out; } if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) { ret = -EOPNOTSUPP; goto out; } /* compression requires us to start the IO */ if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { range.flags |= BTRFS_DEFRAG_RANGE_START_IO; range.extent_thresh = (u32)-1; } } else { /* the rest are all set to zero by kzalloc */ range.len = (u64)-1; } ret = btrfs_defrag_file(BTRFS_I(file_inode(file)), &file->f_ra, &range, BTRFS_OLDEST_GENERATION, 0); if (ret > 0) ret = 0; break; default: ret = -EINVAL; } out: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_vol_args *vol_args; bool restore_op = false; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { btrfs_err(fs_info, "device add not supported on extent tree v2 yet"); return -EINVAL; } if (fs_info->fs_devices->temp_fsid) { btrfs_err(fs_info, "device add not supported on cloned temp-fsid mount"); return -EINVAL; } if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD)) { if (!btrfs_exclop_start_try_lock(fs_info, BTRFS_EXCLOP_DEV_ADD)) return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; /* * We can do the device add because we have a paused balanced, * change the exclusive op type and remember we should bring * back the paused balance */ fs_info->exclusive_operation = BTRFS_EXCLOP_DEV_ADD; btrfs_exclop_start_unlock(fs_info); restore_op = true; } vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) { ret = PTR_ERR(vol_args); goto out; } ret = btrfs_check_ioctl_vol_args_path(vol_args); if (ret < 0) goto out_free; ret = btrfs_init_new_device(fs_info, vol_args->name); if (!ret) btrfs_info(fs_info, "disk added %s", vol_args->name); out_free: kfree(vol_args); out: if (restore_op) btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); else btrfs_exclop_finish(fs_info); return ret; } static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) { BTRFS_DEV_LOOKUP_ARGS(args); struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_ioctl_vol_args_v2 *vol_args; struct file *bdev_file = NULL; int ret; bool cancel = false; if (!capable(CAP_SYS_ADMIN)) return -EPERM; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); if (vol_args->flags & ~BTRFS_DEVICE_REMOVE_ARGS_MASK) { ret = -EOPNOTSUPP; goto out; } ret = btrfs_check_ioctl_vol_args2_subvol_name(vol_args); if (ret < 0) goto out; if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) { args.devid = vol_args->devid; } else if (!strcmp("cancel", vol_args->name)) { cancel = true; } else { ret = btrfs_get_dev_args_from_path(fs_info, &args, vol_args->name); if (ret) goto out; } ret = mnt_want_write_file(file); if (ret) goto out; ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE, cancel); if (ret) goto err_drop; /* Exclusive operation is now claimed */ ret = btrfs_rm_device(fs_info, &args, &bdev_file); btrfs_exclop_finish(fs_info); if (!ret) { if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) btrfs_info(fs_info, "device deleted: id %llu", vol_args->devid); else btrfs_info(fs_info, "device deleted: %s", vol_args->name); } err_drop: mnt_drop_write_file(file); if (bdev_file) fput(bdev_file); out: btrfs_put_dev_args_from_path(&args); kfree(vol_args); return ret; } static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) { BTRFS_DEV_LOOKUP_ARGS(args); struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_ioctl_vol_args *vol_args; struct file *bdev_file = NULL; int ret; bool cancel = false; if (!capable(CAP_SYS_ADMIN)) return -EPERM; vol_args = memdup_user(arg, sizeof(*vol_args)); if (IS_ERR(vol_args)) return PTR_ERR(vol_args); ret = btrfs_check_ioctl_vol_args_path(vol_args); if (ret < 0) goto out_free; if (!strcmp("cancel", vol_args->name)) { cancel = true; } else { ret = btrfs_get_dev_args_from_path(fs_info, &args, vol_args->name); if (ret) goto out; } ret = mnt_want_write_file(file); if (ret) goto out; ret = exclop_start_or_cancel_reloc(fs_info, BTRFS_EXCLOP_DEV_REMOVE, cancel); if (ret == 0) { ret = btrfs_rm_device(fs_info, &args, &bdev_file); if (!ret) btrfs_info(fs_info, "disk deleted %s", vol_args->name); btrfs_exclop_finish(fs_info); } mnt_drop_write_file(file); if (bdev_file) fput(bdev_file); out: btrfs_put_dev_args_from_path(&args); out_free: kfree(vol_args); return ret; } static long btrfs_ioctl_fs_info(const struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_fs_info_args *fi_args; struct btrfs_device *device; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; u64 flags_in; int ret = 0; fi_args = memdup_user(arg, sizeof(*fi_args)); if (IS_ERR(fi_args)) return PTR_ERR(fi_args); flags_in = fi_args->flags; memset(fi_args, 0, sizeof(*fi_args)); rcu_read_lock(); fi_args->num_devices = fs_devices->num_devices; list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) { if (device->devid > fi_args->max_id) fi_args->max_id = device->devid; } rcu_read_unlock(); memcpy(&fi_args->fsid, fs_devices->fsid, sizeof(fi_args->fsid)); fi_args->nodesize = fs_info->nodesize; fi_args->sectorsize = fs_info->sectorsize; fi_args->clone_alignment = fs_info->sectorsize; if (flags_in & BTRFS_FS_INFO_FLAG_CSUM_INFO) { fi_args->csum_type = btrfs_super_csum_type(fs_info->super_copy); fi_args->csum_size = btrfs_super_csum_size(fs_info->super_copy); fi_args->flags |= BTRFS_FS_INFO_FLAG_CSUM_INFO; } if (flags_in & BTRFS_FS_INFO_FLAG_GENERATION) { fi_args->generation = btrfs_get_fs_generation(fs_info); fi_args->flags |= BTRFS_FS_INFO_FLAG_GENERATION; } if (flags_in & BTRFS_FS_INFO_FLAG_METADATA_UUID) { memcpy(&fi_args->metadata_uuid, fs_devices->metadata_uuid, sizeof(fi_args->metadata_uuid)); fi_args->flags |= BTRFS_FS_INFO_FLAG_METADATA_UUID; } if (copy_to_user(arg, fi_args, sizeof(*fi_args))) ret = -EFAULT; kfree(fi_args); return ret; } static long btrfs_ioctl_dev_info(const struct btrfs_fs_info *fs_info, void __user *arg) { BTRFS_DEV_LOOKUP_ARGS(args); struct btrfs_ioctl_dev_info_args *di_args; struct btrfs_device *dev; int ret = 0; di_args = memdup_user(arg, sizeof(*di_args)); if (IS_ERR(di_args)) return PTR_ERR(di_args); args.devid = di_args->devid; if (!btrfs_is_empty_uuid(di_args->uuid)) args.uuid = di_args->uuid; rcu_read_lock(); dev = btrfs_find_device(fs_info->fs_devices, &args); if (!dev) { ret = -ENODEV; goto out; } di_args->devid = dev->devid; di_args->bytes_used = btrfs_device_get_bytes_used(dev); di_args->total_bytes = btrfs_device_get_total_bytes(dev); memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); memcpy(di_args->fsid, dev->fs_devices->fsid, BTRFS_UUID_SIZE); if (dev->name) strscpy(di_args->path, btrfs_dev_name(dev), sizeof(di_args->path)); else di_args->path[0] = '\0'; out: rcu_read_unlock(); if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) ret = -EFAULT; kfree(di_args); return ret; } static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *new_root; struct btrfs_dir_item *di; struct btrfs_trans_handle *trans; struct btrfs_path *path = NULL; struct btrfs_disk_key disk_key; struct fscrypt_str name = FSTR_INIT("default", 7); u64 objectid = 0; u64 dir_id; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; if (copy_from_user(&objectid, argp, sizeof(objectid))) { ret = -EFAULT; goto out; } if (!objectid) objectid = BTRFS_FS_TREE_OBJECTID; new_root = btrfs_get_fs_root(fs_info, objectid, true); if (IS_ERR(new_root)) { ret = PTR_ERR(new_root); goto out; } if (!is_fstree(btrfs_root_id(new_root))) { ret = -ENOENT; goto out_free; } path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out_free; } trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_free; } dir_id = btrfs_super_root_dir(fs_info->super_copy); di = btrfs_lookup_dir_item(trans, fs_info->tree_root, path, dir_id, &name, 1); if (IS_ERR_OR_NULL(di)) { btrfs_release_path(path); btrfs_end_transaction(trans); btrfs_err(fs_info, "Umm, you don't have the default diritem, this isn't going to work"); ret = -ENOENT; goto out_free; } btrfs_cpu_key_to_disk(&disk_key, &new_root->root_key); btrfs_set_dir_item_key(path->nodes[0], di, &disk_key); btrfs_release_path(path); btrfs_set_fs_incompat(fs_info, DEFAULT_SUBVOL); btrfs_end_transaction(trans); out_free: btrfs_put_root(new_root); btrfs_free_path(path); out: mnt_drop_write_file(file); return ret; } static void get_block_group_info(struct list_head *groups_list, struct btrfs_ioctl_space_info *space) { struct btrfs_block_group *block_group; space->total_bytes = 0; space->used_bytes = 0; space->flags = 0; list_for_each_entry(block_group, groups_list, list) { space->flags = block_group->flags; space->total_bytes += block_group->length; space->used_bytes += block_group->used; } } static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_space_args space_args = { 0 }; struct btrfs_ioctl_space_info space; struct btrfs_ioctl_space_info *dest; struct btrfs_ioctl_space_info *dest_orig; struct btrfs_ioctl_space_info __user *user_dest; struct btrfs_space_info *info; static const u64 types[] = { BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_SYSTEM, BTRFS_BLOCK_GROUP_METADATA, BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA }; int num_types = 4; int alloc_size; int ret = 0; u64 slot_count = 0; int i, c; if (copy_from_user(&space_args, (struct btrfs_ioctl_space_args __user *)arg, sizeof(space_args))) return -EFAULT; for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; info = NULL; list_for_each_entry(tmp, &fs_info->space_info, list) { if (tmp->flags == types[i]) { info = tmp; break; } } if (!info) continue; down_read(&info->groups_sem); for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { if (!list_empty(&info->block_groups[c])) slot_count++; } up_read(&info->groups_sem); } /* * Global block reserve, exported as a space_info */ slot_count++; /* space_slots == 0 means they are asking for a count */ if (space_args.space_slots == 0) { space_args.total_spaces = slot_count; goto out; } slot_count = min_t(u64, space_args.space_slots, slot_count); alloc_size = sizeof(*dest) * slot_count; /* we generally have at most 6 or so space infos, one for each raid * level. So, a whole page should be more than enough for everyone */ if (alloc_size > PAGE_SIZE) return -ENOMEM; space_args.total_spaces = 0; dest = kmalloc(alloc_size, GFP_KERNEL); if (!dest) return -ENOMEM; dest_orig = dest; /* now we have a buffer to copy into */ for (i = 0; i < num_types; i++) { struct btrfs_space_info *tmp; if (!slot_count) break; info = NULL; list_for_each_entry(tmp, &fs_info->space_info, list) { if (tmp->flags == types[i]) { info = tmp; break; } } if (!info) continue; down_read(&info->groups_sem); for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { if (!list_empty(&info->block_groups[c])) { get_block_group_info(&info->block_groups[c], &space); memcpy(dest, &space, sizeof(space)); dest++; space_args.total_spaces++; slot_count--; } if (!slot_count) break; } up_read(&info->groups_sem); } /* * Add global block reserve */ if (slot_count) { struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; spin_lock(&block_rsv->lock); space.total_bytes = block_rsv->size; space.used_bytes = block_rsv->size - block_rsv->reserved; spin_unlock(&block_rsv->lock); space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV; memcpy(dest, &space, sizeof(space)); space_args.total_spaces++; } user_dest = (struct btrfs_ioctl_space_info __user *) (arg + sizeof(struct btrfs_ioctl_space_args)); if (copy_to_user(user_dest, dest_orig, alloc_size)) ret = -EFAULT; kfree(dest_orig); out: if (ret == 0 && copy_to_user(arg, &space_args, sizeof(space_args))) ret = -EFAULT; return ret; } static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root, void __user *argp) { struct btrfs_trans_handle *trans; u64 transid; /* * Start orphan cleanup here for the given root in case it hasn't been * started already by other means. Errors are handled in the other * functions during transaction commit. */ btrfs_orphan_cleanup(root); trans = btrfs_attach_transaction_barrier(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT) return PTR_ERR(trans); /* No running transaction, don't bother */ transid = btrfs_get_last_trans_committed(root->fs_info); goto out; } transid = trans->transid; btrfs_commit_transaction_async(trans); out: if (argp) if (copy_to_user(argp, &transid, sizeof(transid))) return -EFAULT; return 0; } static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info, void __user *argp) { /* By default wait for the current transaction. */ u64 transid = 0; if (argp) if (copy_from_user(&transid, argp, sizeof(transid))) return -EFAULT; return btrfs_wait_for_commit(fs_info, transid); } static long btrfs_ioctl_scrub(struct file *file, void __user *arg) { struct btrfs_fs_info *fs_info = inode_to_fs_info(file_inode(file)); struct btrfs_ioctl_scrub_args *sa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { btrfs_err(fs_info, "scrub: extent tree v2 not yet supported"); return -EINVAL; } sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) { ret = -EOPNOTSUPP; goto out; } if (!(sa->flags & BTRFS_SCRUB_READONLY)) { ret = mnt_want_write_file(file); if (ret) goto out; } ret = btrfs_scrub_dev(fs_info, sa->devid, sa->start, sa->end, &sa->progress, sa->flags & BTRFS_SCRUB_READONLY, 0); /* * Copy scrub args to user space even if btrfs_scrub_dev() returned an * error. This is important as it allows user space to know how much * progress scrub has done. For example, if scrub is canceled we get * -ECANCELED from btrfs_scrub_dev() and return that error back to user * space. Later user space can inspect the progress from the structure * btrfs_ioctl_scrub_args and resume scrub from where it left off * previously (btrfs-progs does this). * If we fail to copy the btrfs_ioctl_scrub_args structure to user space * then return -EFAULT to signal the structure was not copied or it may * be corrupt and unreliable due to a partial copy. */ if (copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; if (!(sa->flags & BTRFS_SCRUB_READONLY)) mnt_drop_write_file(file); out: kfree(sa); return ret; } static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; return btrfs_scrub_cancel(fs_info); } static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_scrub_args *sa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress); if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; kfree(sa); return ret; } static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_get_dev_stats *sa; int ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); if ((sa->flags & BTRFS_DEV_STATS_RESET) && !capable(CAP_SYS_ADMIN)) { kfree(sa); return -EPERM; } ret = btrfs_get_dev_stats(fs_info, sa); if (ret == 0 && copy_to_user(arg, sa, sizeof(*sa))) ret = -EFAULT; kfree(sa); return ret; } static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_dev_replace_args *p; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) { btrfs_err(fs_info, "device replace not supported on extent tree v2 yet"); return -EINVAL; } p = memdup_user(arg, sizeof(*p)); if (IS_ERR(p)) return PTR_ERR(p); switch (p->cmd) { case BTRFS_IOCTL_DEV_REPLACE_CMD_START: if (sb_rdonly(fs_info->sb)) { ret = -EROFS; goto out; } if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) { ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; } else { ret = btrfs_dev_replace_by_ioctl(fs_info, p); btrfs_exclop_finish(fs_info); } break; case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS: btrfs_dev_replace_status(fs_info, p); ret = 0; break; case BTRFS_IOCTL_DEV_REPLACE_CMD_CANCEL: p->result = btrfs_dev_replace_cancel(fs_info); ret = 0; break; default: ret = -EINVAL; break; } if ((ret == 0 || ret == -ECANCELED) && copy_to_user(arg, p, sizeof(*p))) ret = -EFAULT; out: kfree(p); return ret; } static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) { int ret = 0; int i; u64 rel_ptr; int size; struct btrfs_ioctl_ino_path_args *ipa = NULL; struct inode_fs_paths *ipath = NULL; struct btrfs_path *path; if (!capable(CAP_DAC_READ_SEARCH)) return -EPERM; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } ipa = memdup_user(arg, sizeof(*ipa)); if (IS_ERR(ipa)) { ret = PTR_ERR(ipa); ipa = NULL; goto out; } size = min_t(u32, ipa->size, 4096); ipath = init_ipath(size, root, path); if (IS_ERR(ipath)) { ret = PTR_ERR(ipath); ipath = NULL; goto out; } ret = paths_from_inode(ipa->inum, ipath); if (ret < 0) goto out; for (i = 0; i < ipath->fspath->elem_cnt; ++i) { rel_ptr = ipath->fspath->val[i] - (u64)(unsigned long)ipath->fspath->val; ipath->fspath->val[i] = rel_ptr; } btrfs_free_path(path); path = NULL; ret = copy_to_user((void __user *)(unsigned long)ipa->fspath, ipath->fspath, size); if (ret) { ret = -EFAULT; goto out; } out: btrfs_free_path(path); free_ipath(ipath); kfree(ipa); return ret; } static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info, void __user *arg, int version) { int ret = 0; int size; struct btrfs_ioctl_logical_ino_args *loi; struct btrfs_data_container *inodes = NULL; struct btrfs_path *path = NULL; bool ignore_offset; if (!capable(CAP_SYS_ADMIN)) return -EPERM; loi = memdup_user(arg, sizeof(*loi)); if (IS_ERR(loi)) return PTR_ERR(loi); if (version == 1) { ignore_offset = false; size = min_t(u32, loi->size, SZ_64K); } else { /* All reserved bits must be 0 for now */ if (memchr_inv(loi->reserved, 0, sizeof(loi->reserved))) { ret = -EINVAL; goto out_loi; } /* Only accept flags we have defined so far */ if (loi->flags & ~(BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET)) { ret = -EINVAL; goto out_loi; } ignore_offset = loi->flags & BTRFS_LOGICAL_INO_ARGS_IGNORE_OFFSET; size = min_t(u32, loi->size, SZ_16M); } inodes = init_data_container(size); if (IS_ERR(inodes)) { ret = PTR_ERR(inodes); goto out_loi; } path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto out; } ret = iterate_inodes_from_logical(loi->logical, fs_info, path, inodes, ignore_offset); btrfs_free_path(path); if (ret == -EINVAL) ret = -ENOENT; if (ret < 0) goto out; ret = copy_to_user((void __user *)(unsigned long)loi->inodes, inodes, size); if (ret) ret = -EFAULT; out: kvfree(inodes); out_loi: kfree(loi); return ret; } void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_balance_args *bargs) { struct btrfs_balance_control *bctl = fs_info->balance_ctl; bargs->flags = bctl->flags; if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) bargs->state |= BTRFS_BALANCE_STATE_RUNNING; if (atomic_read(&fs_info->balance_pause_req)) bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ; if (atomic_read(&fs_info->balance_cancel_req)) bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ; memcpy(&bargs->data, &bctl->data, sizeof(bargs->data)); memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta)); memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys)); spin_lock(&fs_info->balance_lock); memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat)); spin_unlock(&fs_info->balance_lock); } /* * Try to acquire fs_info::balance_mutex as well as set BTRFS_EXLCOP_BALANCE as * required. * * @fs_info: the filesystem * @excl_acquired: ptr to boolean value which is set to false in case balance * is being resumed * * Return 0 on success in which case both fs_info::balance is acquired as well * as exclusive ops are blocked. In case of failure return an error code. */ static int btrfs_try_lock_balance(struct btrfs_fs_info *fs_info, bool *excl_acquired) { int ret; /* * Exclusive operation is locked. Three possibilities: * (1) some other op is running * (2) balance is running * (3) balance is paused -- special case (think resume) */ while (1) { if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) { *excl_acquired = true; mutex_lock(&fs_info->balance_mutex); return 0; } mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl) { /* This is either (2) or (3) */ if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { /* This is (2) */ ret = -EINPROGRESS; goto out_failure; } else { mutex_unlock(&fs_info->balance_mutex); /* * Lock released to allow other waiters to * continue, we'll reexamine the status again. */ mutex_lock(&fs_info->balance_mutex); if (fs_info->balance_ctl && !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) { /* This is (3) */ *excl_acquired = false; return 0; } } } else { /* This is (1) */ ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; goto out_failure; } mutex_unlock(&fs_info->balance_mutex); } out_failure: mutex_unlock(&fs_info->balance_mutex); *excl_acquired = false; return ret; } static long btrfs_ioctl_balance(struct file *file, void __user *arg) { struct btrfs_root *root = BTRFS_I(file_inode(file))->root; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_ioctl_balance_args *bargs; struct btrfs_balance_control *bctl; bool need_unlock = true; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; bargs = memdup_user(arg, sizeof(*bargs)); if (IS_ERR(bargs)) { ret = PTR_ERR(bargs); bargs = NULL; goto out; } ret = btrfs_try_lock_balance(fs_info, &need_unlock); if (ret) goto out; lockdep_assert_held(&fs_info->balance_mutex); if (bargs->flags & BTRFS_BALANCE_RESUME) { if (!fs_info->balance_ctl) { ret = -ENOTCONN; goto out_unlock; } bctl = fs_info->balance_ctl; spin_lock(&fs_info->balance_lock); bctl->flags |= BTRFS_BALANCE_RESUME; spin_unlock(&fs_info->balance_lock); btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE); goto do_balance; } if (bargs->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) { ret = -EINVAL; goto out_unlock; } if (fs_info->balance_ctl) { ret = -EINPROGRESS; goto out_unlock; } bctl = kzalloc(sizeof(*bctl), GFP_KERNEL); if (!bctl) { ret = -ENOMEM; goto out_unlock; } memcpy(&bctl->data, &bargs->data, sizeof(bctl->data)); memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta)); memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys)); bctl->flags = bargs->flags; do_balance: /* * Ownership of bctl and exclusive operation goes to btrfs_balance. * bctl is freed in reset_balance_state, or, if restriper was paused * all the way until unmount, in free_fs_info. The flag should be * cleared after reset_balance_state. */ need_unlock = false; ret = btrfs_balance(fs_info, bctl, bargs); bctl = NULL; if (ret == 0 || ret == -ECANCELED) { if (copy_to_user(arg, bargs, sizeof(*bargs))) ret = -EFAULT; } kfree(bctl); out_unlock: mutex_unlock(&fs_info->balance_mutex); if (need_unlock) btrfs_exclop_finish(fs_info); out: mnt_drop_write_file(file); kfree(bargs); return ret; } static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; switch (cmd) { case BTRFS_BALANCE_CTL_PAUSE: return btrfs_pause_balance(fs_info); case BTRFS_BALANCE_CTL_CANCEL: return btrfs_cancel_balance(fs_info); } return -EINVAL; } static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_balance_args *bargs; int ret = 0; if (!capable(CAP_SYS_ADMIN)) return -EPERM; mutex_lock(&fs_info->balance_mutex); if (!fs_info->balance_ctl) { ret = -ENOTCONN; goto out; } bargs = kzalloc(sizeof(*bargs), GFP_KERNEL); if (!bargs) { ret = -ENOMEM; goto out; } btrfs_update_ioctl_balance_args(fs_info, bargs); if (copy_to_user(arg, bargs, sizeof(*bargs))) ret = -EFAULT; kfree(bargs); out: mutex_unlock(&fs_info->balance_mutex); return ret; } static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_ioctl_quota_ctl_args *sa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = mnt_want_write_file(file); if (ret) return ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) { ret = PTR_ERR(sa); goto drop_write; } switch (sa->cmd) { case BTRFS_QUOTA_CTL_ENABLE: case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA: down_write(&fs_info->subvol_sem); ret = btrfs_quota_enable(fs_info, sa); up_write(&fs_info->subvol_sem); break; case BTRFS_QUOTA_CTL_DISABLE: /* * Lock the cleaner mutex to prevent races with concurrent * relocation, because relocation may be building backrefs for * blocks of the quota root while we are deleting the root. This * is like dropping fs roots of deleted snapshots/subvolumes, we * need the same protection. * * This also prevents races between concurrent tasks trying to * disable quotas, because we will unlock and relock * qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes. * * We take this here because we have the dependency of * * inode_lock -> subvol_sem * * because of rename. With relocation we can prealloc extents, * so that makes the dependency chain * * cleaner_mutex -> inode_lock -> subvol_sem * * so we must take the cleaner_mutex here before we take the * subvol_sem. The deadlock can't actually happen, but this * quiets lockdep. */ mutex_lock(&fs_info->cleaner_mutex); down_write(&fs_info->subvol_sem); ret = btrfs_quota_disable(fs_info); up_write(&fs_info->subvol_sem); mutex_unlock(&fs_info->cleaner_mutex); break; default: ret = -EINVAL; break; } kfree(sa); drop_write: mnt_drop_write_file(file); return ret; } /* * Quick check for ioctl handlers if quotas are enabled. Proper locking must be * done before any operations. */ static bool qgroup_enabled(struct btrfs_fs_info *fs_info) { bool ret = true; mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) ret = false; mutex_unlock(&fs_info->qgroup_ioctl_lock); return ret; } static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_qgroup_assign_args *sa; struct btrfs_qgroup_list *prealloc = NULL; struct btrfs_trans_handle *trans; int ret; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!qgroup_enabled(root->fs_info)) return -ENOTCONN; ret = mnt_want_write_file(file); if (ret) return ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) { ret = PTR_ERR(sa); goto drop_write; } if (sa->assign) { prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); if (!prealloc) { ret = -ENOMEM; goto drop_write; } } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } /* * Prealloc ownership is moved to the relation handler, there it's used * or freed on error. */ if (sa->assign) { ret = btrfs_add_qgroup_relation(trans, sa->src, sa->dst, prealloc); prealloc = NULL; } else { ret = btrfs_del_qgroup_relation(trans, sa->src, sa->dst); } /* update qgroup status and info */ mutex_lock(&fs_info->qgroup_ioctl_lock); err = btrfs_run_qgroups(trans); mutex_unlock(&fs_info->qgroup_ioctl_lock); if (err < 0) btrfs_warn(fs_info, "qgroup status update failed after %s relation, marked as inconsistent", sa->assign ? "adding" : "deleting"); err = btrfs_end_transaction(trans); if (err && !ret) ret = err; out: kfree(prealloc); kfree(sa); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_qgroup_create_args *sa; struct btrfs_trans_handle *trans; int ret; int err; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!qgroup_enabled(root->fs_info)) return -ENOTCONN; ret = mnt_want_write_file(file); if (ret) return ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) { ret = PTR_ERR(sa); goto drop_write; } if (!sa->qgroupid) { ret = -EINVAL; goto out; } if (sa->create && is_fstree(sa->qgroupid)) { ret = -EINVAL; goto out; } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } if (sa->create) { ret = btrfs_create_qgroup(trans, sa->qgroupid); } else { ret = btrfs_remove_qgroup(trans, sa->qgroupid); } err = btrfs_end_transaction(trans); if (err && !ret) ret = err; out: kfree(sa); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_qgroup_limit(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_ioctl_qgroup_limit_args *sa; struct btrfs_trans_handle *trans; int ret; int err; u64 qgroupid; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!qgroup_enabled(root->fs_info)) return -ENOTCONN; ret = mnt_want_write_file(file); if (ret) return ret; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) { ret = PTR_ERR(sa); goto drop_write; } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out; } qgroupid = sa->qgroupid; if (!qgroupid) { /* take the current subvol as qgroup */ qgroupid = btrfs_root_id(root); } ret = btrfs_limit_qgroup(trans, qgroupid, &sa->lim); err = btrfs_end_transaction(trans); if (err && !ret) ret = err; out: kfree(sa); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_quota_rescan(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_ioctl_quota_rescan_args *qsa; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (!qgroup_enabled(fs_info)) return -ENOTCONN; ret = mnt_want_write_file(file); if (ret) return ret; qsa = memdup_user(arg, sizeof(*qsa)); if (IS_ERR(qsa)) { ret = PTR_ERR(qsa); goto drop_write; } if (qsa->flags) { ret = -EINVAL; goto out; } ret = btrfs_qgroup_rescan(fs_info); out: kfree(qsa); drop_write: mnt_drop_write_file(file); return ret; } static long btrfs_ioctl_quota_rescan_status(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_ioctl_quota_rescan_args qsa = {0}; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) { qsa.flags = 1; qsa.progress = fs_info->qgroup_rescan_progress.objectid; } if (copy_to_user(arg, &qsa, sizeof(qsa))) return -EFAULT; return 0; } static long btrfs_ioctl_quota_rescan_wait(struct btrfs_fs_info *fs_info) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; return btrfs_qgroup_wait_for_completion(fs_info, true); } static long _btrfs_ioctl_set_received_subvol(struct file *file, struct mnt_idmap *idmap, struct btrfs_ioctl_received_subvol_args *sa) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root_item *root_item = &root->root_item; struct btrfs_trans_handle *trans; struct timespec64 ct = current_time(inode); int ret = 0; int received_uuid_changed; if (!inode_owner_or_capable(idmap, inode)) return -EPERM; ret = mnt_want_write_file(file); if (ret < 0) return ret; down_write(&fs_info->subvol_sem); if (btrfs_ino(BTRFS_I(inode)) != BTRFS_FIRST_FREE_OBJECTID) { ret = -EINVAL; goto out; } if (btrfs_root_readonly(root)) { ret = -EROFS; goto out; } /* * 1 - root item * 2 - uuid items (received uuid + subvol uuid) */ trans = btrfs_start_transaction(root, 3); if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; goto out; } sa->rtransid = trans->transid; sa->rtime.sec = ct.tv_sec; sa->rtime.nsec = ct.tv_nsec; received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE); if (received_uuid_changed && !btrfs_is_empty_uuid(root_item->received_uuid)) { ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, btrfs_root_id(root)); if (ret && ret != -ENOENT) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } } memcpy(root_item->received_uuid, sa->uuid, BTRFS_UUID_SIZE); btrfs_set_root_stransid(root_item, sa->stransid); btrfs_set_root_rtransid(root_item, sa->rtransid); btrfs_set_stack_timespec_sec(&root_item->stime, sa->stime.sec); btrfs_set_stack_timespec_nsec(&root_item->stime, sa->stime.nsec); btrfs_set_stack_timespec_sec(&root_item->rtime, sa->rtime.sec); btrfs_set_stack_timespec_nsec(&root_item->rtime, sa->rtime.nsec); ret = btrfs_update_root(trans, fs_info->tree_root, &root->root_key, &root->root_item); if (ret < 0) { btrfs_end_transaction(trans); goto out; } if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) { ret = btrfs_uuid_tree_add(trans, sa->uuid, BTRFS_UUID_KEY_RECEIVED_SUBVOL, btrfs_root_id(root)); if (ret < 0 && ret != -EEXIST) { btrfs_abort_transaction(trans, ret); btrfs_end_transaction(trans); goto out; } } ret = btrfs_commit_transaction(trans); out: up_write(&fs_info->subvol_sem); mnt_drop_write_file(file); return ret; } #ifdef CONFIG_64BIT static long btrfs_ioctl_set_received_subvol_32(struct file *file, void __user *arg) { struct btrfs_ioctl_received_subvol_args_32 *args32 = NULL; struct btrfs_ioctl_received_subvol_args *args64 = NULL; int ret = 0; args32 = memdup_user(arg, sizeof(*args32)); if (IS_ERR(args32)) return PTR_ERR(args32); args64 = kmalloc(sizeof(*args64), GFP_KERNEL); if (!args64) { ret = -ENOMEM; goto out; } memcpy(args64->uuid, args32->uuid, BTRFS_UUID_SIZE); args64->stransid = args32->stransid; args64->rtransid = args32->rtransid; args64->stime.sec = args32->stime.sec; args64->stime.nsec = args32->stime.nsec; args64->rtime.sec = args32->rtime.sec; args64->rtime.nsec = args32->rtime.nsec; args64->flags = args32->flags; ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_idmap(file), args64); if (ret) goto out; memcpy(args32->uuid, args64->uuid, BTRFS_UUID_SIZE); args32->stransid = args64->stransid; args32->rtransid = args64->rtransid; args32->stime.sec = args64->stime.sec; args32->stime.nsec = args64->stime.nsec; args32->rtime.sec = args64->rtime.sec; args32->rtime.nsec = args64->rtime.nsec; args32->flags = args64->flags; ret = copy_to_user(arg, args32, sizeof(*args32)); if (ret) ret = -EFAULT; out: kfree(args32); kfree(args64); return ret; } #endif static long btrfs_ioctl_set_received_subvol(struct file *file, void __user *arg) { struct btrfs_ioctl_received_subvol_args *sa = NULL; int ret = 0; sa = memdup_user(arg, sizeof(*sa)); if (IS_ERR(sa)) return PTR_ERR(sa); ret = _btrfs_ioctl_set_received_subvol(file, file_mnt_idmap(file), sa); if (ret) goto out; ret = copy_to_user(arg, sa, sizeof(*sa)); if (ret) ret = -EFAULT; out: kfree(sa); return ret; } static int btrfs_ioctl_get_fslabel(struct btrfs_fs_info *fs_info, void __user *arg) { size_t len; int ret; char label[BTRFS_LABEL_SIZE]; spin_lock(&fs_info->super_lock); memcpy(label, fs_info->super_copy->label, BTRFS_LABEL_SIZE); spin_unlock(&fs_info->super_lock); len = strnlen(label, BTRFS_LABEL_SIZE); if (len == BTRFS_LABEL_SIZE) { btrfs_warn(fs_info, "label is too long, return the first %zu bytes", --len); } ret = copy_to_user(arg, label, len); return ret ? -EFAULT : 0; } static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_trans_handle *trans; char label[BTRFS_LABEL_SIZE]; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(label, arg, sizeof(label))) return -EFAULT; if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) { btrfs_err(fs_info, "unable to set label with more than %d bytes", BTRFS_LABEL_SIZE - 1); return -EINVAL; } ret = mnt_want_write_file(file); if (ret) return ret; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_unlock; } spin_lock(&fs_info->super_lock); strcpy(super_block->label, label); spin_unlock(&fs_info->super_lock); ret = btrfs_commit_transaction(trans); out_unlock: mnt_drop_write_file(file); return ret; } #define INIT_FEATURE_FLAGS(suffix) \ { .compat_flags = BTRFS_FEATURE_COMPAT_##suffix, \ .compat_ro_flags = BTRFS_FEATURE_COMPAT_RO_##suffix, \ .incompat_flags = BTRFS_FEATURE_INCOMPAT_##suffix } int btrfs_ioctl_get_supported_features(void __user *arg) { static const struct btrfs_ioctl_feature_flags features[3] = { INIT_FEATURE_FLAGS(SUPP), INIT_FEATURE_FLAGS(SAFE_SET), INIT_FEATURE_FLAGS(SAFE_CLEAR) }; if (copy_to_user(arg, &features, sizeof(features))) return -EFAULT; return 0; } static int btrfs_ioctl_get_features(struct btrfs_fs_info *fs_info, void __user *arg) { struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_ioctl_feature_flags features; features.compat_flags = btrfs_super_compat_flags(super_block); features.compat_ro_flags = btrfs_super_compat_ro_flags(super_block); features.incompat_flags = btrfs_super_incompat_flags(super_block); if (copy_to_user(arg, &features, sizeof(features))) return -EFAULT; return 0; } static int check_feature_bits(const struct btrfs_fs_info *fs_info, enum btrfs_feature_set set, u64 change_mask, u64 flags, u64 supported_flags, u64 safe_set, u64 safe_clear) { const char *type = btrfs_feature_set_name(set); char *names; u64 disallowed, unsupported; u64 set_mask = flags & change_mask; u64 clear_mask = ~flags & change_mask; unsupported = set_mask & ~supported_flags; if (unsupported) { names = btrfs_printable_features(set, unsupported); if (names) { btrfs_warn(fs_info, "this kernel does not support the %s feature bit%s", names, strchr(names, ',') ? "s" : ""); kfree(names); } else btrfs_warn(fs_info, "this kernel does not support %s bits 0x%llx", type, unsupported); return -EOPNOTSUPP; } disallowed = set_mask & ~safe_set; if (disallowed) { names = btrfs_printable_features(set, disallowed); if (names) { btrfs_warn(fs_info, "can't set the %s feature bit%s while mounted", names, strchr(names, ',') ? "s" : ""); kfree(names); } else btrfs_warn(fs_info, "can't set %s bits 0x%llx while mounted", type, disallowed); return -EPERM; } disallowed = clear_mask & ~safe_clear; if (disallowed) { names = btrfs_printable_features(set, disallowed); if (names) { btrfs_warn(fs_info, "can't clear the %s feature bit%s while mounted", names, strchr(names, ',') ? "s" : ""); kfree(names); } else btrfs_warn(fs_info, "can't clear %s bits 0x%llx while mounted", type, disallowed); return -EPERM; } return 0; } #define check_feature(fs_info, change_mask, flags, mask_base) \ check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \ BTRFS_FEATURE_ ## mask_base ## _SUPP, \ BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \ BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR) static int btrfs_ioctl_set_features(struct file *file, void __user *arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_super_block *super_block = fs_info->super_copy; struct btrfs_ioctl_feature_flags flags[2]; struct btrfs_trans_handle *trans; u64 newflags; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(flags, arg, sizeof(flags))) return -EFAULT; /* Nothing to do */ if (!flags[0].compat_flags && !flags[0].compat_ro_flags && !flags[0].incompat_flags) return 0; ret = check_feature(fs_info, flags[0].compat_flags, flags[1].compat_flags, COMPAT); if (ret) return ret; ret = check_feature(fs_info, flags[0].compat_ro_flags, flags[1].compat_ro_flags, COMPAT_RO); if (ret) return ret; ret = check_feature(fs_info, flags[0].incompat_flags, flags[1].incompat_flags, INCOMPAT); if (ret) return ret; ret = mnt_want_write_file(file); if (ret) return ret; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_drop_write; } spin_lock(&fs_info->super_lock); newflags = btrfs_super_compat_flags(super_block); newflags |= flags[0].compat_flags & flags[1].compat_flags; newflags &= ~(flags[0].compat_flags & ~flags[1].compat_flags); btrfs_set_super_compat_flags(super_block, newflags); newflags = btrfs_super_compat_ro_flags(super_block); newflags |= flags[0].compat_ro_flags & flags[1].compat_ro_flags; newflags &= ~(flags[0].compat_ro_flags & ~flags[1].compat_ro_flags); btrfs_set_super_compat_ro_flags(super_block, newflags); newflags = btrfs_super_incompat_flags(super_block); newflags |= flags[0].incompat_flags & flags[1].incompat_flags; newflags &= ~(flags[0].incompat_flags & ~flags[1].incompat_flags); btrfs_set_super_incompat_flags(super_block, newflags); spin_unlock(&fs_info->super_lock); ret = btrfs_commit_transaction(trans); out_drop_write: mnt_drop_write_file(file); return ret; } static int _btrfs_ioctl_send(struct btrfs_root *root, void __user *argp, bool compat) { struct btrfs_ioctl_send_args *arg; int ret; if (compat) { #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) struct btrfs_ioctl_send_args_32 args32 = { 0 }; ret = copy_from_user(&args32, argp, sizeof(args32)); if (ret) return -EFAULT; arg = kzalloc(sizeof(*arg), GFP_KERNEL); if (!arg) return -ENOMEM; arg->send_fd = args32.send_fd; arg->clone_sources_count = args32.clone_sources_count; arg->clone_sources = compat_ptr(args32.clone_sources); arg->parent_root = args32.parent_root; arg->flags = args32.flags; arg->version = args32.version; memcpy(arg->reserved, args32.reserved, sizeof(args32.reserved)); #else return -ENOTTY; #endif } else { arg = memdup_user(argp, sizeof(*arg)); if (IS_ERR(arg)) return PTR_ERR(arg); } ret = btrfs_ioctl_send(root, arg); kfree(arg); return ret; } static int btrfs_ioctl_encoded_read(struct file *file, void __user *argp, bool compat) { struct btrfs_ioctl_encoded_io_args args = { 0 }; size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags); size_t copy_end; struct btrfs_inode *inode = BTRFS_I(file_inode(file)); struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_io_tree *io_tree = &inode->io_tree; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; loff_t pos; struct kiocb kiocb; ssize_t ret; u64 disk_bytenr, disk_io_size; struct extent_state *cached_state = NULL; if (!capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out_acct; } if (compat) { #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) struct btrfs_ioctl_encoded_io_args_32 args32; copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags); if (copy_from_user(&args32, argp, copy_end)) { ret = -EFAULT; goto out_acct; } args.iov = compat_ptr(args32.iov); args.iovcnt = args32.iovcnt; args.offset = args32.offset; args.flags = args32.flags; #else return -ENOTTY; #endif } else { copy_end = copy_end_kernel; if (copy_from_user(&args, argp, copy_end)) { ret = -EFAULT; goto out_acct; } } if (args.flags != 0) { ret = -EINVAL; goto out_acct; } ret = import_iovec(ITER_DEST, args.iov, args.iovcnt, ARRAY_SIZE(iovstack), &iov, &iter); if (ret < 0) goto out_acct; if (iov_iter_count(&iter) == 0) { ret = 0; goto out_iov; } pos = args.offset; ret = rw_verify_area(READ, file, &pos, args.len); if (ret < 0) goto out_iov; init_sync_kiocb(&kiocb, file); kiocb.ki_pos = pos; ret = btrfs_encoded_read(&kiocb, &iter, &args, &cached_state, &disk_bytenr, &disk_io_size); if (ret == -EIOCBQUEUED) { bool unlocked = false; u64 start, lockend, count; start = ALIGN_DOWN(kiocb.ki_pos, fs_info->sectorsize); lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; if (args.compression) count = disk_io_size; else count = args.len; ret = btrfs_encoded_read_regular(&kiocb, &iter, start, lockend, &cached_state, disk_bytenr, disk_io_size, count, args.compression, &unlocked); if (!unlocked) { btrfs_unlock_extent(io_tree, start, lockend, &cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); } } if (ret >= 0) { fsnotify_access(file); if (copy_to_user(argp + copy_end, (char *)&args + copy_end_kernel, sizeof(args) - copy_end_kernel)) ret = -EFAULT; } out_iov: kfree(iov); out_acct: if (ret > 0) add_rchar(current, ret); inc_syscr(current); return ret; } static int btrfs_ioctl_encoded_write(struct file *file, void __user *argp, bool compat) { struct btrfs_ioctl_encoded_io_args args; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; loff_t pos; struct kiocb kiocb; ssize_t ret; if (!capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out_acct; } if (!(file->f_mode & FMODE_WRITE)) { ret = -EBADF; goto out_acct; } if (compat) { #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) struct btrfs_ioctl_encoded_io_args_32 args32; if (copy_from_user(&args32, argp, sizeof(args32))) { ret = -EFAULT; goto out_acct; } args.iov = compat_ptr(args32.iov); args.iovcnt = args32.iovcnt; args.offset = args32.offset; args.flags = args32.flags; args.len = args32.len; args.unencoded_len = args32.unencoded_len; args.unencoded_offset = args32.unencoded_offset; args.compression = args32.compression; args.encryption = args32.encryption; memcpy(args.reserved, args32.reserved, sizeof(args.reserved)); #else return -ENOTTY; #endif } else { if (copy_from_user(&args, argp, sizeof(args))) { ret = -EFAULT; goto out_acct; } } ret = -EINVAL; if (args.flags != 0) goto out_acct; if (memchr_inv(args.reserved, 0, sizeof(args.reserved))) goto out_acct; if (args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE && args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE) goto out_acct; if (args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES || args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES) goto out_acct; if (args.unencoded_offset > args.unencoded_len) goto out_acct; if (args.len > args.unencoded_len - args.unencoded_offset) goto out_acct; ret = import_iovec(ITER_SOURCE, args.iov, args.iovcnt, ARRAY_SIZE(iovstack), &iov, &iter); if (ret < 0) goto out_acct; if (iov_iter_count(&iter) == 0) { ret = 0; goto out_iov; } pos = args.offset; ret = rw_verify_area(WRITE, file, &pos, args.len); if (ret < 0) goto out_iov; init_sync_kiocb(&kiocb, file); ret = kiocb_set_rw_flags(&kiocb, 0, WRITE); if (ret) goto out_iov; kiocb.ki_pos = pos; file_start_write(file); ret = btrfs_do_write_iter(&kiocb, &iter, &args); if (ret > 0) fsnotify_modify(file); file_end_write(file); out_iov: kfree(iov); out_acct: if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; } /* * Context that's attached to an encoded read io_uring command, in cmd->pdu. It * contains the fields in btrfs_uring_read_extent that are necessary to finish * off and cleanup the I/O in btrfs_uring_read_finished. */ struct btrfs_uring_priv { struct io_uring_cmd *cmd; struct page **pages; unsigned long nr_pages; struct kiocb iocb; struct iovec *iov; struct iov_iter iter; struct extent_state *cached_state; u64 count; u64 start; u64 lockend; int err; bool compressed; }; struct io_btrfs_cmd { struct btrfs_uring_priv *priv; }; static void btrfs_uring_read_finished(struct io_uring_cmd *cmd, unsigned int issue_flags) { struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(cmd, struct io_btrfs_cmd); struct btrfs_uring_priv *priv = bc->priv; struct btrfs_inode *inode = BTRFS_I(file_inode(priv->iocb.ki_filp)); struct extent_io_tree *io_tree = &inode->io_tree; unsigned long index; u64 cur; size_t page_offset; ssize_t ret; /* The inode lock has already been acquired in btrfs_uring_read_extent. */ btrfs_lockdep_inode_acquire(inode, i_rwsem); if (priv->err) { ret = priv->err; goto out; } if (priv->compressed) { index = 0; page_offset = 0; } else { index = (priv->iocb.ki_pos - priv->start) >> PAGE_SHIFT; page_offset = offset_in_page(priv->iocb.ki_pos - priv->start); } cur = 0; while (cur < priv->count) { size_t bytes = min_t(size_t, priv->count - cur, PAGE_SIZE - page_offset); if (copy_page_to_iter(priv->pages[index], page_offset, bytes, &priv->iter) != bytes) { ret = -EFAULT; goto out; } index++; cur += bytes; page_offset = 0; } ret = priv->count; out: btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); io_uring_cmd_done(cmd, ret, 0, issue_flags); add_rchar(current, ret); for (index = 0; index < priv->nr_pages; index++) __free_page(priv->pages[index]); kfree(priv->pages); kfree(priv->iov); kfree(priv); } void btrfs_uring_read_extent_endio(void *ctx, int err) { struct btrfs_uring_priv *priv = ctx; struct io_btrfs_cmd *bc = io_uring_cmd_to_pdu(priv->cmd, struct io_btrfs_cmd); priv->err = err; bc->priv = priv; io_uring_cmd_complete_in_task(priv->cmd, btrfs_uring_read_finished); } static int btrfs_uring_read_extent(struct kiocb *iocb, struct iov_iter *iter, u64 start, u64 lockend, struct extent_state *cached_state, u64 disk_bytenr, u64 disk_io_size, size_t count, bool compressed, struct iovec *iov, struct io_uring_cmd *cmd) { struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); struct extent_io_tree *io_tree = &inode->io_tree; struct page **pages; struct btrfs_uring_priv *priv = NULL; unsigned long nr_pages; int ret; nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE); pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); if (!pages) return -ENOMEM; ret = btrfs_alloc_page_array(nr_pages, pages, 0); if (ret) { ret = -ENOMEM; goto out_fail; } priv = kmalloc(sizeof(*priv), GFP_NOFS); if (!priv) { ret = -ENOMEM; goto out_fail; } priv->iocb = *iocb; priv->iov = iov; priv->iter = *iter; priv->count = count; priv->cmd = cmd; priv->cached_state = cached_state; priv->compressed = compressed; priv->nr_pages = nr_pages; priv->pages = pages; priv->start = start; priv->lockend = lockend; priv->err = 0; ret = btrfs_encoded_read_regular_fill_pages(inode, disk_bytenr, disk_io_size, pages, priv); if (ret && ret != -EIOCBQUEUED) goto out_fail; /* * If we return -EIOCBQUEUED, we're deferring the cleanup to * btrfs_uring_read_finished(), which will handle unlocking the extent * and inode and freeing the allocations. */ /* * We're returning to userspace with the inode lock held, and that's * okay - it'll get unlocked in a worker thread. Call * btrfs_lockdep_inode_release() to avoid confusing lockdep. */ btrfs_lockdep_inode_release(inode, i_rwsem); return -EIOCBQUEUED; out_fail: btrfs_unlock_extent(io_tree, start, lockend, &cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); kfree(priv); return ret; } struct btrfs_uring_encoded_data { struct btrfs_ioctl_encoded_io_args args; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov; struct iov_iter iter; }; static int btrfs_uring_encoded_read(struct io_uring_cmd *cmd, unsigned int issue_flags) { size_t copy_end_kernel = offsetofend(struct btrfs_ioctl_encoded_io_args, flags); size_t copy_end; int ret; u64 disk_bytenr, disk_io_size; struct file *file; struct btrfs_inode *inode; struct btrfs_fs_info *fs_info; struct extent_io_tree *io_tree; loff_t pos; struct kiocb kiocb; struct extent_state *cached_state = NULL; u64 start, lockend; void __user *sqe_addr; struct btrfs_uring_encoded_data *data = io_uring_cmd_get_async_data(cmd)->op_data; if (!capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out_acct; } file = cmd->file; inode = BTRFS_I(file->f_inode); fs_info = inode->root->fs_info; io_tree = &inode->io_tree; sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)); if (issue_flags & IO_URING_F_COMPAT) { #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) copy_end = offsetofend(struct btrfs_ioctl_encoded_io_args_32, flags); #else return -ENOTTY; #endif } else { copy_end = copy_end_kernel; } if (!data) { data = kzalloc(sizeof(*data), GFP_NOFS); if (!data) { ret = -ENOMEM; goto out_acct; } io_uring_cmd_get_async_data(cmd)->op_data = data; if (issue_flags & IO_URING_F_COMPAT) { #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) struct btrfs_ioctl_encoded_io_args_32 args32; if (copy_from_user(&args32, sqe_addr, copy_end)) { ret = -EFAULT; goto out_acct; } data->args.iov = compat_ptr(args32.iov); data->args.iovcnt = args32.iovcnt; data->args.offset = args32.offset; data->args.flags = args32.flags; #endif } else { if (copy_from_user(&data->args, sqe_addr, copy_end)) { ret = -EFAULT; goto out_acct; } } if (data->args.flags != 0) { ret = -EINVAL; goto out_acct; } data->iov = data->iovstack; ret = import_iovec(ITER_DEST, data->args.iov, data->args.iovcnt, ARRAY_SIZE(data->iovstack), &data->iov, &data->iter); if (ret < 0) goto out_acct; if (iov_iter_count(&data->iter) == 0) { ret = 0; goto out_free; } } pos = data->args.offset; ret = rw_verify_area(READ, file, &pos, data->args.len); if (ret < 0) goto out_free; init_sync_kiocb(&kiocb, file); kiocb.ki_pos = pos; if (issue_flags & IO_URING_F_NONBLOCK) kiocb.ki_flags |= IOCB_NOWAIT; start = ALIGN_DOWN(pos, fs_info->sectorsize); lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; ret = btrfs_encoded_read(&kiocb, &data->iter, &data->args, &cached_state, &disk_bytenr, &disk_io_size); if (ret == -EAGAIN) goto out_acct; if (ret < 0 && ret != -EIOCBQUEUED) goto out_free; file_accessed(file); if (copy_to_user(sqe_addr + copy_end, (const char *)&data->args + copy_end_kernel, sizeof(data->args) - copy_end_kernel)) { if (ret == -EIOCBQUEUED) { btrfs_unlock_extent(io_tree, start, lockend, &cached_state); btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); } ret = -EFAULT; goto out_free; } if (ret == -EIOCBQUEUED) { u64 count = min_t(u64, iov_iter_count(&data->iter), disk_io_size); /* Match ioctl by not returning past EOF if uncompressed. */ if (!data->args.compression) count = min_t(u64, count, data->args.len); ret = btrfs_uring_read_extent(&kiocb, &data->iter, start, lockend, cached_state, disk_bytenr, disk_io_size, count, data->args.compression, data->iov, cmd); goto out_acct; } out_free: kfree(data->iov); out_acct: if (ret > 0) add_rchar(current, ret); inc_syscr(current); return ret; } static int btrfs_uring_encoded_write(struct io_uring_cmd *cmd, unsigned int issue_flags) { loff_t pos; struct kiocb kiocb; struct file *file; ssize_t ret; void __user *sqe_addr; struct btrfs_uring_encoded_data *data = io_uring_cmd_get_async_data(cmd)->op_data; if (!capable(CAP_SYS_ADMIN)) { ret = -EPERM; goto out_acct; } file = cmd->file; sqe_addr = u64_to_user_ptr(READ_ONCE(cmd->sqe->addr)); if (!(file->f_mode & FMODE_WRITE)) { ret = -EBADF; goto out_acct; } if (!data) { data = kzalloc(sizeof(*data), GFP_NOFS); if (!data) { ret = -ENOMEM; goto out_acct; } io_uring_cmd_get_async_data(cmd)->op_data = data; if (issue_flags & IO_URING_F_COMPAT) { #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) struct btrfs_ioctl_encoded_io_args_32 args32; if (copy_from_user(&args32, sqe_addr, sizeof(args32))) { ret = -EFAULT; goto out_acct; } data->args.iov = compat_ptr(args32.iov); data->args.iovcnt = args32.iovcnt; data->args.offset = args32.offset; data->args.flags = args32.flags; data->args.len = args32.len; data->args.unencoded_len = args32.unencoded_len; data->args.unencoded_offset = args32.unencoded_offset; data->args.compression = args32.compression; data->args.encryption = args32.encryption; memcpy(data->args.reserved, args32.reserved, sizeof(data->args.reserved)); #else ret = -ENOTTY; goto out_acct; #endif } else { if (copy_from_user(&data->args, sqe_addr, sizeof(data->args))) { ret = -EFAULT; goto out_acct; } } ret = -EINVAL; if (data->args.flags != 0) goto out_acct; if (memchr_inv(data->args.reserved, 0, sizeof(data->args.reserved))) goto out_acct; if (data->args.compression == BTRFS_ENCODED_IO_COMPRESSION_NONE && data->args.encryption == BTRFS_ENCODED_IO_ENCRYPTION_NONE) goto out_acct; if (data->args.compression >= BTRFS_ENCODED_IO_COMPRESSION_TYPES || data->args.encryption >= BTRFS_ENCODED_IO_ENCRYPTION_TYPES) goto out_acct; if (data->args.unencoded_offset > data->args.unencoded_len) goto out_acct; if (data->args.len > data->args.unencoded_len - data->args.unencoded_offset) goto out_acct; data->iov = data->iovstack; ret = import_iovec(ITER_SOURCE, data->args.iov, data->args.iovcnt, ARRAY_SIZE(data->iovstack), &data->iov, &data->iter); if (ret < 0) goto out_acct; if (iov_iter_count(&data->iter) == 0) { ret = 0; goto out_iov; } } if (issue_flags & IO_URING_F_NONBLOCK) { ret = -EAGAIN; goto out_acct; } pos = data->args.offset; ret = rw_verify_area(WRITE, file, &pos, data->args.len); if (ret < 0) goto out_iov; init_sync_kiocb(&kiocb, file); ret = kiocb_set_rw_flags(&kiocb, 0, WRITE); if (ret) goto out_iov; kiocb.ki_pos = pos; file_start_write(file); ret = btrfs_do_write_iter(&kiocb, &data->iter, &data->args); if (ret > 0) fsnotify_modify(file); file_end_write(file); out_iov: kfree(data->iov); out_acct: if (ret > 0) add_wchar(current, ret); inc_syscw(current); return ret; } int btrfs_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) { switch (cmd->cmd_op) { case BTRFS_IOC_ENCODED_READ: #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) case BTRFS_IOC_ENCODED_READ_32: #endif return btrfs_uring_encoded_read(cmd, issue_flags); case BTRFS_IOC_ENCODED_WRITE: #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) case BTRFS_IOC_ENCODED_WRITE_32: #endif return btrfs_uring_encoded_write(cmd, issue_flags); } return -EINVAL; } static int btrfs_ioctl_subvol_sync(struct btrfs_fs_info *fs_info, void __user *argp) { struct btrfs_root *root; struct btrfs_ioctl_subvol_wait args = { 0 }; signed long sched_ret; int refs; u64 root_flags; bool wait_for_deletion = false; bool found = false; if (copy_from_user(&args, argp, sizeof(args))) return -EFAULT; switch (args.mode) { case BTRFS_SUBVOL_SYNC_WAIT_FOR_QUEUED: /* * Wait for the first one deleted that waits until all previous * are cleaned. */ spin_lock(&fs_info->trans_lock); if (!list_empty(&fs_info->dead_roots)) { root = list_last_entry(&fs_info->dead_roots, struct btrfs_root, root_list); args.subvolid = btrfs_root_id(root); found = true; } spin_unlock(&fs_info->trans_lock); if (!found) return -ENOENT; fallthrough; case BTRFS_SUBVOL_SYNC_WAIT_FOR_ONE: if ((0 < args.subvolid && args.subvolid < BTRFS_FIRST_FREE_OBJECTID) || BTRFS_LAST_FREE_OBJECTID < args.subvolid) return -EINVAL; break; case BTRFS_SUBVOL_SYNC_COUNT: spin_lock(&fs_info->trans_lock); args.count = list_count_nodes(&fs_info->dead_roots); spin_unlock(&fs_info->trans_lock); if (copy_to_user(argp, &args, sizeof(args))) return -EFAULT; return 0; case BTRFS_SUBVOL_SYNC_PEEK_FIRST: spin_lock(&fs_info->trans_lock); /* Last in the list was deleted first. */ if (!list_empty(&fs_info->dead_roots)) { root = list_last_entry(&fs_info->dead_roots, struct btrfs_root, root_list); args.subvolid = btrfs_root_id(root); } else { args.subvolid = 0; } spin_unlock(&fs_info->trans_lock); if (copy_to_user(argp, &args, sizeof(args))) return -EFAULT; return 0; case BTRFS_SUBVOL_SYNC_PEEK_LAST: spin_lock(&fs_info->trans_lock); /* First in the list was deleted last. */ if (!list_empty(&fs_info->dead_roots)) { root = list_first_entry(&fs_info->dead_roots, struct btrfs_root, root_list); args.subvolid = btrfs_root_id(root); } else { args.subvolid = 0; } spin_unlock(&fs_info->trans_lock); if (copy_to_user(argp, &args, sizeof(args))) return -EFAULT; return 0; default: return -EINVAL; } /* 32bit limitation: fs_roots_radix key is not wide enough. */ if (sizeof(unsigned long) != sizeof(u64) && args.subvolid > U32_MAX) return -EOVERFLOW; while (1) { /* Wait for the specific one. */ if (down_read_interruptible(&fs_info->subvol_sem) == -EINTR) return -EINTR; refs = -1; spin_lock(&fs_info->fs_roots_radix_lock); root = radix_tree_lookup(&fs_info->fs_roots_radix, (unsigned long)args.subvolid); if (root) { spin_lock(&root->root_item_lock); refs = btrfs_root_refs(&root->root_item); root_flags = btrfs_root_flags(&root->root_item); spin_unlock(&root->root_item_lock); } spin_unlock(&fs_info->fs_roots_radix_lock); up_read(&fs_info->subvol_sem); /* Subvolume does not exist. */ if (!root) return -ENOENT; /* Subvolume not deleted at all. */ if (refs > 0) return -EEXIST; /* We've waited and now the subvolume is gone. */ if (wait_for_deletion && refs == -1) { /* Return the one we waited for as the last one. */ if (copy_to_user(argp, &args, sizeof(args))) return -EFAULT; return 0; } /* Subvolume not found on the first try (deleted or never existed). */ if (refs == -1) return -ENOENT; wait_for_deletion = true; ASSERT(root_flags & BTRFS_ROOT_SUBVOL_DEAD); sched_ret = schedule_timeout_interruptible(HZ); /* Early wake up or error. */ if (sched_ret != 0) return -EINTR; } return 0; } long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file_inode(file); struct btrfs_fs_info *fs_info = inode_to_fs_info(inode); struct btrfs_root *root = BTRFS_I(inode)->root; void __user *argp = (void __user *)arg; switch (cmd) { case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(inode, argp); case FS_IOC_GETFSLABEL: return btrfs_ioctl_get_fslabel(fs_info, argp); case FS_IOC_SETFSLABEL: return btrfs_ioctl_set_fslabel(file, argp); case FITRIM: return btrfs_ioctl_fitrim(fs_info, argp); case BTRFS_IOC_SNAP_CREATE: return btrfs_ioctl_snap_create(file, argp, 0); case BTRFS_IOC_SNAP_CREATE_V2: return btrfs_ioctl_snap_create_v2(file, argp, 0); case BTRFS_IOC_SUBVOL_CREATE: return btrfs_ioctl_snap_create(file, argp, 1); case BTRFS_IOC_SUBVOL_CREATE_V2: return btrfs_ioctl_snap_create_v2(file, argp, 1); case BTRFS_IOC_SNAP_DESTROY: return btrfs_ioctl_snap_destroy(file, argp, false); case BTRFS_IOC_SNAP_DESTROY_V2: return btrfs_ioctl_snap_destroy(file, argp, true); case BTRFS_IOC_SUBVOL_GETFLAGS: return btrfs_ioctl_subvol_getflags(BTRFS_I(inode), argp); case BTRFS_IOC_SUBVOL_SETFLAGS: return btrfs_ioctl_subvol_setflags(file, argp); case BTRFS_IOC_DEFAULT_SUBVOL: return btrfs_ioctl_default_subvol(file, argp); case BTRFS_IOC_DEFRAG: return btrfs_ioctl_defrag(file, NULL); case BTRFS_IOC_DEFRAG_RANGE: return btrfs_ioctl_defrag(file, argp); case BTRFS_IOC_RESIZE: return btrfs_ioctl_resize(file, argp); case BTRFS_IOC_ADD_DEV: return btrfs_ioctl_add_dev(fs_info, argp); case BTRFS_IOC_RM_DEV: return btrfs_ioctl_rm_dev(file, argp); case BTRFS_IOC_RM_DEV_V2: return btrfs_ioctl_rm_dev_v2(file, argp); case BTRFS_IOC_FS_INFO: return btrfs_ioctl_fs_info(fs_info, argp); case BTRFS_IOC_DEV_INFO: return btrfs_ioctl_dev_info(fs_info, argp); case BTRFS_IOC_TREE_SEARCH: return btrfs_ioctl_tree_search(root, argp); case BTRFS_IOC_TREE_SEARCH_V2: return btrfs_ioctl_tree_search_v2(root, argp); case BTRFS_IOC_INO_LOOKUP: return btrfs_ioctl_ino_lookup(root, argp); case BTRFS_IOC_INO_PATHS: return btrfs_ioctl_ino_to_path(root, argp); case BTRFS_IOC_LOGICAL_INO: return btrfs_ioctl_logical_to_ino(fs_info, argp, 1); case BTRFS_IOC_LOGICAL_INO_V2: return btrfs_ioctl_logical_to_ino(fs_info, argp, 2); case BTRFS_IOC_SPACE_INFO: return btrfs_ioctl_space_info(fs_info, argp); case BTRFS_IOC_SYNC: { int ret; ret = btrfs_start_delalloc_roots(fs_info, LONG_MAX, false); if (ret) return ret; ret = btrfs_sync_fs(inode->i_sb, 1); /* * There may be work for the cleaner kthread to do (subvolume * deletion, delayed iputs, defrag inodes, etc), so wake it up. */ wake_up_process(fs_info->cleaner_kthread); return ret; } case BTRFS_IOC_START_SYNC: return btrfs_ioctl_start_sync(root, argp); case BTRFS_IOC_WAIT_SYNC: return btrfs_ioctl_wait_sync(fs_info, argp); case BTRFS_IOC_SCRUB: return btrfs_ioctl_scrub(file, argp); case BTRFS_IOC_SCRUB_CANCEL: return btrfs_ioctl_scrub_cancel(fs_info); case BTRFS_IOC_SCRUB_PROGRESS: return btrfs_ioctl_scrub_progress(fs_info, argp); case BTRFS_IOC_BALANCE_V2: return btrfs_ioctl_balance(file, argp); case BTRFS_IOC_BALANCE_CTL: return btrfs_ioctl_balance_ctl(fs_info, arg); case BTRFS_IOC_BALANCE_PROGRESS: return btrfs_ioctl_balance_progress(fs_info, argp); case BTRFS_IOC_SET_RECEIVED_SUBVOL: return btrfs_ioctl_set_received_subvol(file, argp); #ifdef CONFIG_64BIT case BTRFS_IOC_SET_RECEIVED_SUBVOL_32: return btrfs_ioctl_set_received_subvol_32(file, argp); #endif case BTRFS_IOC_SEND: return _btrfs_ioctl_send(root, argp, false); #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) case BTRFS_IOC_SEND_32: return _btrfs_ioctl_send(root, argp, true); #endif case BTRFS_IOC_GET_DEV_STATS: return btrfs_ioctl_get_dev_stats(fs_info, argp); case BTRFS_IOC_QUOTA_CTL: return btrfs_ioctl_quota_ctl(file, argp); case BTRFS_IOC_QGROUP_ASSIGN: return btrfs_ioctl_qgroup_assign(file, argp); case BTRFS_IOC_QGROUP_CREATE: return btrfs_ioctl_qgroup_create(file, argp); case BTRFS_IOC_QGROUP_LIMIT: return btrfs_ioctl_qgroup_limit(file, argp); case BTRFS_IOC_QUOTA_RESCAN: return btrfs_ioctl_quota_rescan(file, argp); case BTRFS_IOC_QUOTA_RESCAN_STATUS: return btrfs_ioctl_quota_rescan_status(fs_info, argp); case BTRFS_IOC_QUOTA_RESCAN_WAIT: return btrfs_ioctl_quota_rescan_wait(fs_info); case BTRFS_IOC_DEV_REPLACE: return btrfs_ioctl_dev_replace(fs_info, argp); case BTRFS_IOC_GET_SUPPORTED_FEATURES: return btrfs_ioctl_get_supported_features(argp); case BTRFS_IOC_GET_FEATURES: return btrfs_ioctl_get_features(fs_info, argp); case BTRFS_IOC_SET_FEATURES: return btrfs_ioctl_set_features(file, argp); case BTRFS_IOC_GET_SUBVOL_INFO: return btrfs_ioctl_get_subvol_info(inode, argp); case BTRFS_IOC_GET_SUBVOL_ROOTREF: return btrfs_ioctl_get_subvol_rootref(root, argp); case BTRFS_IOC_INO_LOOKUP_USER: return btrfs_ioctl_ino_lookup_user(file, argp); case FS_IOC_ENABLE_VERITY: return fsverity_ioctl_enable(file, (const void __user *)argp); case FS_IOC_MEASURE_VERITY: return fsverity_ioctl_measure(file, argp); case FS_IOC_READ_VERITY_METADATA: return fsverity_ioctl_read_metadata(file, argp); case BTRFS_IOC_ENCODED_READ: return btrfs_ioctl_encoded_read(file, argp, false); case BTRFS_IOC_ENCODED_WRITE: return btrfs_ioctl_encoded_write(file, argp, false); #if defined(CONFIG_64BIT) && defined(CONFIG_COMPAT) case BTRFS_IOC_ENCODED_READ_32: return btrfs_ioctl_encoded_read(file, argp, true); case BTRFS_IOC_ENCODED_WRITE_32: return btrfs_ioctl_encoded_write(file, argp, true); #endif case BTRFS_IOC_SUBVOL_SYNC_WAIT: return btrfs_ioctl_subvol_sync(fs_info, argp); } return -ENOTTY; } #ifdef CONFIG_COMPAT long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { /* * These all access 32-bit values anyway so no further * handling is necessary. */ switch (cmd) { case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; } return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); } #endif |
| 571 466 661 662 594 184 596 225 225 225 225 225 225 611 258 501 520 388 595 630 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KVM_X86_MMU_H #define __KVM_X86_MMU_H #include <linux/kvm_host.h> #include "kvm_cache_regs.h" #include "x86.h" #include "cpuid.h" extern bool __read_mostly enable_mmio_caching; #define PT_WRITABLE_SHIFT 1 #define PT_USER_SHIFT 2 #define PT_PRESENT_MASK (1ULL << 0) #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) #define PT_USER_MASK (1ULL << PT_USER_SHIFT) #define PT_PWT_MASK (1ULL << 3) #define PT_PCD_MASK (1ULL << 4) #define PT_ACCESSED_SHIFT 5 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) #define PT_DIRTY_SHIFT 6 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) #define PT_PAGE_SIZE_SHIFT 7 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) #define PT_PAT_MASK (1ULL << 7) #define PT_GLOBAL_MASK (1ULL << 8) #define PT64_NX_SHIFT 63 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) #define PT_PAT_SHIFT 7 #define PT_DIR_PAT_SHIFT 12 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) #define PT64_ROOT_5LEVEL 5 #define PT64_ROOT_4LEVEL 4 #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE) #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP) #define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX) static __always_inline u64 rsvd_bits(int s, int e) { BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s); if (__builtin_constant_p(e)) BUILD_BUG_ON(e > 63); else e &= 63; if (e < s) return 0; return ((2ULL << (e - s)) - 1) << s; } static inline gfn_t kvm_mmu_max_gfn(void) { /* * Note that this uses the host MAXPHYADDR, not the guest's. * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR; * assuming KVM is running on bare metal, guest accesses beyond * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit * (either EPT Violation/Misconfig or #NPF), and so KVM will never * install a SPTE for such addresses. If KVM is running as a VM * itself, on the other hand, it might see a MAXPHYADDR that is less * than hardware's real MAXPHYADDR. Using the host MAXPHYADDR * disallows such SPTEs entirely and simplifies the TDP MMU. */ int max_gpa_bits = likely(tdp_enabled) ? kvm_host.maxphyaddr : 52; return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1; } u8 kvm_mmu_get_max_tdp_level(void); void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); void kvm_mmu_set_mmio_spte_value(struct kvm *kvm, u64 mmio_value); void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask); void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); void kvm_init_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, unsigned long cr4, u64 efer, gpa_t nested_cr3); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, int huge_page_level, bool accessed_dirty, gpa_t new_eptp); bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, u64 fault_address, char *insn, int insn_len); void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu); void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, int bytes); static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) { if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) kvm_mmu_free_obsolete_roots(vcpu); /* * Checking root.hpa is sufficient even when KVM has mirror root. * We can have either: * (1) mirror_root_hpa = INVALID_PAGE, root.hpa = INVALID_PAGE * (2) mirror_root_hpa = root, root.hpa = INVALID_PAGE * (3) mirror_root_hpa = root1, root.hpa = root2 * We don't ever have: * mirror_root_hpa = INVALID_PAGE, root.hpa = root */ if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE)) return 0; return kvm_mmu_load(vcpu); } static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3) { BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0); return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE) ? cr3 & X86_CR3_PCID_MASK : 0; } static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) { return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu)); } static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu) { if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LAM)) return 0; return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57); } static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) { u64 root_hpa = vcpu->arch.mmu->root.hpa; if (!VALID_PAGE(root_hpa)) return; kvm_x86_call(load_mmu_pgd)(vcpu, root_hpa, vcpu->arch.mmu->root_role.level); } static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) { /* * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e. * @mmu's snapshot of CR0.WP and thus all related paging metadata may * be stale. Refresh CR0.WP and the metadata on-demand when checking * for permission faults. Exempt nested MMUs, i.e. MMUs for shadowing * nEPT and nNPT, as CR0.WP is ignored in both cases. Note, KVM does * need to refresh nested_mmu, a.k.a. the walker used to translate L2 * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP. */ if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) return; __kvm_mmu_refresh_passthrough_bits(vcpu, mmu); } /* * Check if a given access (described through the I/D, W/R and U/S bits of a * page fault error code pfec) causes a permission fault with the given PTE * access rights (in ACC_* format). * * Return zero if the access does not fault; return the page fault error code * if the access faults. */ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned pte_access, unsigned pte_pkey, u64 access) { /* strip nested paging fault error codes */ unsigned int pfec = access; unsigned long rflags = kvm_x86_call(get_rflags)(vcpu); /* * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1. * For implicit supervisor accesses, SMAP cannot be overridden. * * SMAP works on supervisor accesses only, and not_smap can * be set or not set when user access with neither has any bearing * on the result. * * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit; * this bit will always be zero in pfec, but it will be one in index * if SMAP checks are being disabled. */ u64 implicit_access = access & PFERR_IMPLICIT_ACCESS; bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC; int index = (pfec | (not_smap ? PFERR_RSVD_MASK : 0)) >> 1; u32 errcode = PFERR_PRESENT_MASK; bool fault; kvm_mmu_refresh_passthrough_bits(vcpu, mmu); fault = (mmu->permissions[index] >> pte_access) & 1; WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK)); if (unlikely(mmu->pkru_mask)) { u32 pkru_bits, offset; /* * PKRU defines 32 bits, there are 16 domains and 2 * attribute bits per domain in pkru. pte_pkey is the * index of the protection domain, so pte_pkey * 2 is * is the index of the first bit for the domain. */ pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */ offset = (pfec & ~1) | ((pte_access & PT_USER_MASK) ? PFERR_RSVD_MASK : 0); pkru_bits &= mmu->pkru_mask >> offset; errcode |= -pkru_bits & PFERR_PK_MASK; fault |= (pkru_bits != 0); } return -(u32)fault & errcode; } bool kvm_mmu_may_ignore_guest_pat(struct kvm *kvm); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); static inline bool kvm_shadow_root_allocated(struct kvm *kvm) { /* * Read shadow_root_allocated before related pointers. Hence, threads * reading shadow_root_allocated in any lock context are guaranteed to * see the pointers. Pairs with smp_store_release in * mmu_first_shadow_root_alloc. */ return smp_load_acquire(&kvm->arch.shadow_root_allocated); } #ifdef CONFIG_X86_64 extern bool tdp_mmu_enabled; #else #define tdp_mmu_enabled false #endif bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa); int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level); static inline bool kvm_memslots_have_rmaps(struct kvm *kvm) { return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm); } static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) { /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */ return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); } static inline unsigned long __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages, int level) { return gfn_to_index(slot->base_gfn + npages - 1, slot->base_gfn, level) + 1; } static inline unsigned long kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level) { return __kvm_mmu_slot_lpages(slot, slot->npages, level); } static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count) { atomic64_add(count, &kvm->stat.pages[level - 1]); } gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, struct x86_exception *exception); static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, gpa_t gpa, u64 access, struct x86_exception *exception) { if (mmu != &vcpu->arch.nested_mmu) return gpa; return translate_nested_gpa(vcpu, gpa, access, exception); } static inline bool kvm_has_mirrored_tdp(const struct kvm *kvm) { return kvm->arch.vm_type == KVM_X86_TDX_VM; } static inline gfn_t kvm_gfn_direct_bits(const struct kvm *kvm) { return kvm->arch.gfn_direct_bits; } static inline bool kvm_is_addr_direct(struct kvm *kvm, gpa_t gpa) { gpa_t gpa_direct_bits = gfn_to_gpa(kvm_gfn_direct_bits(kvm)); return !gpa_direct_bits || (gpa & gpa_direct_bits); } static inline bool kvm_is_gfn_alias(struct kvm *kvm, gfn_t gfn) { return gfn & kvm_gfn_direct_bits(kvm); } #endif |
| 396 398 37 122 21 6 1 2 402 41 446 6 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 | // SPDX-License-Identifier: GPL-2.0 /* * udc.c - Core UDC Framework * * Copyright (C) 2016 Intel Corporation * Author: Felipe Balbi <felipe.balbi@linux.intel.com> */ #undef TRACE_SYSTEM #define TRACE_SYSTEM gadget #if !defined(__UDC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) #define __UDC_TRACE_H #include <linux/types.h> #include <linux/tracepoint.h> #include <asm/byteorder.h> #include <linux/usb/gadget.h> DECLARE_EVENT_CLASS(udc_log_gadget, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret), TP_STRUCT__entry( __field(enum usb_device_speed, speed) __field(enum usb_device_speed, max_speed) __field(enum usb_device_state, state) __field(unsigned, mA) __field(unsigned, sg_supported) __field(unsigned, is_otg) __field(unsigned, is_a_peripheral) __field(unsigned, b_hnp_enable) __field(unsigned, a_hnp_support) __field(unsigned, hnp_polling_support) __field(unsigned, host_request_flag) __field(unsigned, quirk_ep_out_aligned_size) __field(unsigned, quirk_altset_not_supp) __field(unsigned, quirk_stall_not_supp) __field(unsigned, quirk_zlp_not_supp) __field(unsigned, is_selfpowered) __field(unsigned, deactivated) __field(unsigned, connected) __field(int, ret) ), TP_fast_assign( __entry->speed = g->speed; __entry->max_speed = g->max_speed; __entry->state = g->state; __entry->mA = g->mA; __entry->sg_supported = g->sg_supported; __entry->is_otg = g->is_otg; __entry->is_a_peripheral = g->is_a_peripheral; __entry->b_hnp_enable = g->b_hnp_enable; __entry->a_hnp_support = g->a_hnp_support; __entry->hnp_polling_support = g->hnp_polling_support; __entry->host_request_flag = g->host_request_flag; __entry->quirk_ep_out_aligned_size = g->quirk_ep_out_aligned_size; __entry->quirk_altset_not_supp = g->quirk_altset_not_supp; __entry->quirk_stall_not_supp = g->quirk_stall_not_supp; __entry->quirk_zlp_not_supp = g->quirk_zlp_not_supp; __entry->is_selfpowered = g->is_selfpowered; __entry->deactivated = g->deactivated; __entry->connected = g->connected; __entry->ret = ret; ), TP_printk("speed %d/%d state %d %dmA [%s%s%s%s%s%s%s%s%s%s%s%s%s%s] --> %d", __entry->speed, __entry->max_speed, __entry->state, __entry->mA, __entry->sg_supported ? "sg:" : "", __entry->is_otg ? "OTG:" : "", __entry->is_a_peripheral ? "a_peripheral:" : "", __entry->b_hnp_enable ? "b_hnp:" : "", __entry->a_hnp_support ? "a_hnp:" : "", __entry->hnp_polling_support ? "hnp_poll:" : "", __entry->host_request_flag ? "hostreq:" : "", __entry->quirk_ep_out_aligned_size ? "out_aligned:" : "", __entry->quirk_altset_not_supp ? "no_altset:" : "", __entry->quirk_stall_not_supp ? "no_stall:" : "", __entry->quirk_zlp_not_supp ? "no_zlp" : "", __entry->is_selfpowered ? "self-powered:" : "bus-powered:", __entry->deactivated ? "deactivated:" : "activated:", __entry->connected ? "connected" : "disconnected", __entry->ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_frame_number, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_set_remote_wakeup, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_clear_selfpowered, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_connect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_draw, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_disconnect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_connect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_disconnect, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_deactivate, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DEFINE_EVENT(udc_log_gadget, usb_gadget_activate, TP_PROTO(struct usb_gadget *g, int ret), TP_ARGS(g, ret) ); DECLARE_EVENT_CLASS(udc_log_ep, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret), TP_STRUCT__entry( __string(name, ep->name) __field(unsigned, maxpacket) __field(unsigned, maxpacket_limit) __field(unsigned, max_streams) __field(unsigned, mult) __field(unsigned, maxburst) __field(u8, address) __field(bool, claimed) __field(bool, enabled) __field(int, ret) ), TP_fast_assign( __assign_str(name); __entry->maxpacket = ep->maxpacket; __entry->maxpacket_limit = ep->maxpacket_limit; __entry->max_streams = ep->max_streams; __entry->mult = ep->mult; __entry->maxburst = ep->maxburst; __entry->address = ep->address, __entry->claimed = ep->claimed; __entry->enabled = ep->enabled; __entry->ret = ret; ), TP_printk("%s: mps %d/%d streams %d mult %d burst %d addr %02x %s%s --> %d", __get_str(name), __entry->maxpacket, __entry->maxpacket_limit, __entry->max_streams, __entry->mult, __entry->maxburst, __entry->address, __entry->claimed ? "claimed:" : "released:", __entry->enabled ? "enabled" : "disabled", ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_maxpacket_limit, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_enable, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_disable, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_halt, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_clear_halt, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_set_wedge, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_fifo_status, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DEFINE_EVENT(udc_log_ep, usb_ep_fifo_flush, TP_PROTO(struct usb_ep *ep, int ret), TP_ARGS(ep, ret) ); DECLARE_EVENT_CLASS(udc_log_req, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret), TP_STRUCT__entry( __string(name, ep->name) __field(unsigned, length) __field(unsigned, actual) __field(unsigned, num_sgs) __field(unsigned, num_mapped_sgs) __field(unsigned, stream_id) __field(unsigned, no_interrupt) __field(unsigned, zero) __field(unsigned, short_not_ok) __field(int, status) __field(int, ret) __field(struct usb_request *, req) ), TP_fast_assign( __assign_str(name); __entry->length = req->length; __entry->actual = req->actual; __entry->num_sgs = req->num_sgs; __entry->num_mapped_sgs = req->num_mapped_sgs; __entry->stream_id = req->stream_id; __entry->no_interrupt = req->no_interrupt; __entry->zero = req->zero; __entry->short_not_ok = req->short_not_ok; __entry->status = req->status; __entry->ret = ret; __entry->req = req; ), TP_printk("%s: req %p length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d", __get_str(name),__entry->req, __entry->actual, __entry->length, __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream_id, __entry->zero ? "Z" : "z", __entry->short_not_ok ? "S" : "s", __entry->no_interrupt ? "i" : "I", __entry->status, __entry->ret ) ); DEFINE_EVENT(udc_log_req, usb_ep_alloc_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_free_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_queue, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_ep_dequeue, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); DEFINE_EVENT(udc_log_req, usb_gadget_giveback_request, TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret), TP_ARGS(ep, req, ret) ); #endif /* __UDC_TRACE_H */ /* this part has to be here */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE trace #include <trace/define_trace.h> |
| 21 15 15 15 15 156 15 94 93 94 94 94 94 93 94 15 15 15 15 15 15 15 15 15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006, Thomas Gleixner, Russell King * * This file contains the interrupt descriptor management code. Detailed * information is available in Documentation/core-api/genericirq.rst * */ #include <linux/irq.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/maple_tree.h> #include <linux/irqdomain.h> #include <linux/sysfs.h> #include <linux/string_choices.h> #include "internals.h" /* * lockdep: we want to handle all irq_desc locks as a single lock-class: */ static struct lock_class_key irq_desc_lock_class; #if defined(CONFIG_SMP) static int __init irq_affinity_setup(char *str) { alloc_bootmem_cpumask_var(&irq_default_affinity); cpulist_parse(str, irq_default_affinity); /* * Set at least the boot cpu. We don't want to end up with * bugreports caused by random commandline masks */ cpumask_set_cpu(smp_processor_id(), irq_default_affinity); return 1; } __setup("irqaffinity=", irq_affinity_setup); static void __init init_irq_default_affinity(void) { if (!cpumask_available(irq_default_affinity)) zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); if (cpumask_empty(irq_default_affinity)) cpumask_setall(irq_default_affinity); } #else static void __init init_irq_default_affinity(void) { } #endif #ifdef CONFIG_SMP static int alloc_masks(struct irq_desc *desc, int node) { if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, GFP_KERNEL, node)) return -ENOMEM; #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, GFP_KERNEL, node)) { free_cpumask_var(desc->irq_common_data.affinity); return -ENOMEM; } #endif #ifdef CONFIG_GENERIC_PENDING_IRQ if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK free_cpumask_var(desc->irq_common_data.effective_affinity); #endif free_cpumask_var(desc->irq_common_data.affinity); return -ENOMEM; } #endif return 0; } static void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { if (!affinity) affinity = irq_default_affinity; cpumask_copy(desc->irq_common_data.affinity, affinity); #ifdef CONFIG_GENERIC_PENDING_IRQ cpumask_clear(desc->pending_mask); #endif #ifdef CONFIG_NUMA desc->irq_common_data.node = node; #endif } static void free_masks(struct irq_desc *desc) { #ifdef CONFIG_GENERIC_PENDING_IRQ free_cpumask_var(desc->pending_mask); #endif free_cpumask_var(desc->irq_common_data.affinity); #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK free_cpumask_var(desc->irq_common_data.effective_affinity); #endif } #else static inline int alloc_masks(struct irq_desc *desc, int node) { return 0; } static inline void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } static inline void free_masks(struct irq_desc *desc) { } #endif static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, const struct cpumask *affinity, struct module *owner) { int cpu; desc->irq_common_data.handler_data = NULL; desc->irq_common_data.msi_desc = NULL; desc->irq_data.common = &desc->irq_common_data; desc->irq_data.irq = irq; desc->irq_data.chip = &no_irq_chip; desc->irq_data.chip_data = NULL; irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); desc->handle_irq = handle_bad_irq; desc->depth = 1; desc->irq_count = 0; desc->irqs_unhandled = 0; desc->tot_count = 0; desc->name = NULL; desc->owner = owner; for_each_possible_cpu(cpu) *per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { }; desc_smp_init(desc, node, affinity); } static unsigned int nr_irqs = NR_IRQS; /** * irq_get_nr_irqs() - Number of interrupts supported by the system. */ unsigned int irq_get_nr_irqs(void) { return nr_irqs; } EXPORT_SYMBOL_GPL(irq_get_nr_irqs); /** * irq_set_nr_irqs() - Set the number of interrupts supported by the system. * @nr: New number of interrupts. * * Return: @nr. */ unsigned int irq_set_nr_irqs(unsigned int nr) { nr_irqs = nr; return nr; } EXPORT_SYMBOL_GPL(irq_set_nr_irqs); static DEFINE_MUTEX(sparse_irq_lock); static struct maple_tree sparse_irqs = MTREE_INIT_EXT(sparse_irqs, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN | MT_FLAGS_USE_RCU, sparse_irq_lock); static int irq_find_free_area(unsigned int from, unsigned int cnt) { MA_STATE(mas, &sparse_irqs, 0, 0); if (mas_empty_area(&mas, from, MAX_SPARSE_IRQS, cnt)) return -ENOSPC; return mas.index; } static unsigned int irq_find_at_or_after(unsigned int offset) { unsigned long index = offset; struct irq_desc *desc; guard(rcu)(); desc = mt_find(&sparse_irqs, &index, nr_irqs); return desc ? irq_desc_get_irq(desc) : nr_irqs; } static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) { MA_STATE(mas, &sparse_irqs, irq, irq); WARN_ON(mas_store_gfp(&mas, desc, GFP_KERNEL) != 0); } static void delete_irq_desc(unsigned int irq) { MA_STATE(mas, &sparse_irqs, irq, irq); mas_erase(&mas); } #ifdef CONFIG_SPARSE_IRQ static const struct kobj_type irq_kobj_type; #endif static int init_desc(struct irq_desc *desc, int irq, int node, unsigned int flags, const struct cpumask *affinity, struct module *owner) { desc->kstat_irqs = alloc_percpu(struct irqstat); if (!desc->kstat_irqs) return -ENOMEM; if (alloc_masks(desc, node)) { free_percpu(desc->kstat_irqs); return -ENOMEM; } raw_spin_lock_init(&desc->lock); lockdep_set_class(&desc->lock, &irq_desc_lock_class); mutex_init(&desc->request_mutex); init_waitqueue_head(&desc->wait_for_threads); desc_set_defaults(irq, desc, node, affinity, owner); irqd_set(&desc->irq_data, flags); irq_resend_init(desc); #ifdef CONFIG_SPARSE_IRQ kobject_init(&desc->kobj, &irq_kobj_type); init_rcu_head(&desc->rcu); #endif return 0; } #ifdef CONFIG_SPARSE_IRQ static void irq_kobj_release(struct kobject *kobj); #ifdef CONFIG_SYSFS static struct kobject *irq_kobj_base; #define IRQ_ATTR_RO(_name) \ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); ssize_t ret = 0; char *p = ""; int cpu; for_each_possible_cpu(cpu) { unsigned int c = irq_desc_kstat_cpu(desc, cpu); ret += sysfs_emit_at(buf, ret, "%s%u", p, c); p = ","; } ret += sysfs_emit_at(buf, ret, "\n"); return ret; } IRQ_ATTR_RO(per_cpu_count); static ssize_t chip_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); guard(raw_spinlock_irq)(&desc->lock); if (desc->irq_data.chip && desc->irq_data.chip->name) return sysfs_emit(buf, "%s\n", desc->irq_data.chip->name); return 0; } IRQ_ATTR_RO(chip_name); static ssize_t hwirq_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); guard(raw_spinlock_irq)(&desc->lock); if (desc->irq_data.domain) return sysfs_emit(buf, "%lu\n", desc->irq_data.hwirq); return 0; } IRQ_ATTR_RO(hwirq); static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); guard(raw_spinlock_irq)(&desc->lock); return sysfs_emit(buf, "%s\n", irqd_is_level_type(&desc->irq_data) ? "level" : "edge"); } IRQ_ATTR_RO(type); static ssize_t wakeup_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); guard(raw_spinlock_irq)(&desc->lock); return sysfs_emit(buf, "%s\n", str_enabled_disabled(irqd_is_wakeup_set(&desc->irq_data))); } IRQ_ATTR_RO(wakeup); static ssize_t name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); guard(raw_spinlock_irq)(&desc->lock); if (desc->name) return sysfs_emit(buf, "%s\n", desc->name); return 0; } IRQ_ATTR_RO(name); static ssize_t actions_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); struct irqaction *action; ssize_t ret = 0; char *p = ""; scoped_guard(raw_spinlock_irq, &desc->lock) { for_each_action_of_desc(desc, action) { ret += sysfs_emit_at(buf, ret, "%s%s", p, action->name); p = ","; } } if (ret) ret += sysfs_emit_at(buf, ret, "\n"); return ret; } IRQ_ATTR_RO(actions); static struct attribute *irq_attrs[] = { &per_cpu_count_attr.attr, &chip_name_attr.attr, &hwirq_attr.attr, &type_attr.attr, &wakeup_attr.attr, &name_attr.attr, &actions_attr.attr, NULL }; ATTRIBUTE_GROUPS(irq); static const struct kobj_type irq_kobj_type = { .release = irq_kobj_release, .sysfs_ops = &kobj_sysfs_ops, .default_groups = irq_groups, }; static void irq_sysfs_add(int irq, struct irq_desc *desc) { if (irq_kobj_base) { /* * Continue even in case of failure as this is nothing * crucial and failures in the late irq_sysfs_init() * cannot be rolled back. */ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq)) pr_warn("Failed to add kobject for irq %d\n", irq); else desc->istate |= IRQS_SYSFS; } } static void irq_sysfs_del(struct irq_desc *desc) { /* * Only invoke kobject_del() when kobject_add() was successfully * invoked for the descriptor. This covers both early boot, where * sysfs is not initialized yet, and the case of a failed * kobject_add() invocation. */ if (desc->istate & IRQS_SYSFS) kobject_del(&desc->kobj); } static int __init irq_sysfs_init(void) { struct irq_desc *desc; int irq; /* Prevent concurrent irq alloc/free */ guard(mutex)(&sparse_irq_lock); irq_kobj_base = kobject_create_and_add("irq", kernel_kobj); if (!irq_kobj_base) return -ENOMEM; /* Add the already allocated interrupts */ for_each_irq_desc(irq, desc) irq_sysfs_add(irq, desc); return 0; } postcore_initcall(irq_sysfs_init); #else /* !CONFIG_SYSFS */ static const struct kobj_type irq_kobj_type = { .release = irq_kobj_release, }; static void irq_sysfs_add(int irq, struct irq_desc *desc) {} static void irq_sysfs_del(struct irq_desc *desc) {} #endif /* CONFIG_SYSFS */ struct irq_desc *irq_to_desc(unsigned int irq) { return mtree_load(&sparse_irqs, irq); } #ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE EXPORT_SYMBOL_GPL(irq_to_desc); #endif void irq_lock_sparse(void) { mutex_lock(&sparse_irq_lock); } void irq_unlock_sparse(void) { mutex_unlock(&sparse_irq_lock); } static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, const struct cpumask *affinity, struct module *owner) { struct irq_desc *desc; int ret; desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); if (!desc) return NULL; ret = init_desc(desc, irq, node, flags, affinity, owner); if (unlikely(ret)) { kfree(desc); return NULL; } return desc; } static void irq_kobj_release(struct kobject *kobj) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); free_masks(desc); free_percpu(desc->kstat_irqs); kfree(desc); } static void delayed_free_desc(struct rcu_head *rhp) { struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); kobject_put(&desc->kobj); } static void free_desc(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); irq_remove_debugfs_entry(desc); unregister_irq_proc(irq, desc); /* * sparse_irq_lock protects also show_interrupts() and * kstat_irq_usr(). Once we deleted the descriptor from the * sparse tree we can free it. Access in proc will fail to * lookup the descriptor. * * The sysfs entry must be serialized against a concurrent * irq_sysfs_init() as well. */ irq_sysfs_del(desc); delete_irq_desc(irq); /* * We free the descriptor, masks and stat fields via RCU. That * allows demultiplex interrupts to do rcu based management of * the child interrupts. * This also allows us to use rcu in kstat_irqs_usr(). */ call_rcu(&desc->rcu, delayed_free_desc); } static int alloc_descs(unsigned int start, unsigned int cnt, int node, const struct irq_affinity_desc *affinity, struct module *owner) { struct irq_desc *desc; int i; /* Validate affinity mask(s) */ if (affinity) { for (i = 0; i < cnt; i++) { if (cpumask_empty(&affinity[i].mask)) return -EINVAL; } } for (i = 0; i < cnt; i++) { const struct cpumask *mask = NULL; unsigned int flags = 0; if (affinity) { if (affinity->is_managed) { flags = IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN; } flags |= IRQD_AFFINITY_SET; mask = &affinity->mask; node = cpu_to_node(cpumask_first(mask)); affinity++; } desc = alloc_desc(start + i, node, flags, mask, owner); if (!desc) goto err; irq_insert_desc(start + i, desc); irq_sysfs_add(start + i, desc); irq_add_debugfs_entry(start + i, desc); } return start; err: for (i--; i >= 0; i--) free_desc(start + i); return -ENOMEM; } static bool irq_expand_nr_irqs(unsigned int nr) { if (nr > MAX_SPARSE_IRQS) return false; nr_irqs = nr; return true; } int __init early_irq_init(void) { int i, initcnt, node = first_online_node; struct irq_desc *desc; init_irq_default_affinity(); /* Let arch update nr_irqs and return the nr of preallocated irqs */ initcnt = arch_probe_nr_irqs(); printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n", NR_IRQS, nr_irqs, initcnt); if (WARN_ON(nr_irqs > MAX_SPARSE_IRQS)) nr_irqs = MAX_SPARSE_IRQS; if (WARN_ON(initcnt > MAX_SPARSE_IRQS)) initcnt = MAX_SPARSE_IRQS; if (initcnt > nr_irqs) nr_irqs = initcnt; for (i = 0; i < initcnt; i++) { desc = alloc_desc(i, node, 0, NULL, NULL); irq_insert_desc(i, desc); } return arch_early_irq_init(); } #else /* !CONFIG_SPARSE_IRQ */ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { [0 ... NR_IRQS-1] = { .handle_irq = handle_bad_irq, .depth = 1, .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), } }; int __init early_irq_init(void) { int count, i, node = first_online_node; int ret; init_irq_default_affinity(); printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); count = ARRAY_SIZE(irq_desc); for (i = 0; i < count; i++) { ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL); if (unlikely(ret)) goto __free_desc_res; } return arch_early_irq_init(); __free_desc_res: while (--i >= 0) { free_masks(irq_desc + i); free_percpu(irq_desc[i].kstat_irqs); } return ret; } struct irq_desc *irq_to_desc(unsigned int irq) { return (irq < NR_IRQS) ? irq_desc + irq : NULL; } EXPORT_SYMBOL(irq_to_desc); static void free_desc(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); scoped_guard(raw_spinlock_irqsave, &desc->lock) desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL); delete_irq_desc(irq); } static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, const struct irq_affinity_desc *affinity, struct module *owner) { u32 i; for (i = 0; i < cnt; i++) { struct irq_desc *desc = irq_to_desc(start + i); desc->owner = owner; irq_insert_desc(start + i, desc); } return start; } static inline bool irq_expand_nr_irqs(unsigned int nr) { return false; } void irq_mark_irq(unsigned int irq) { guard(mutex)(&sparse_irq_lock); irq_insert_desc(irq, irq_desc + irq); } #ifdef CONFIG_GENERIC_IRQ_LEGACY void irq_init_desc(unsigned int irq) { free_desc(irq); } #endif #endif /* !CONFIG_SPARSE_IRQ */ int handle_irq_desc(struct irq_desc *desc) { struct irq_data *data; if (!desc) return -EINVAL; data = irq_desc_get_irq_data(desc); if (WARN_ON_ONCE(!in_hardirq() && irqd_is_handle_enforce_irqctx(data))) return -EPERM; generic_handle_irq_desc(desc); return 0; } /** * generic_handle_irq - Invoke the handler for a particular irq * @irq: The irq number to handle * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an IRQ context with irq regs * initialized. */ int generic_handle_irq(unsigned int irq) { return handle_irq_desc(irq_to_desc(irq)); } EXPORT_SYMBOL_GPL(generic_handle_irq); /** * generic_handle_irq_safe - Invoke the handler for a particular irq from any * context. * @irq: The irq number to handle * * Returns: 0 on success, a negative value on error. * * This function can be called from any context (IRQ or process context). It * will report an error if not invoked from IRQ context and the irq has been * marked to enforce IRQ-context only. */ int generic_handle_irq_safe(unsigned int irq) { unsigned long flags; int ret; local_irq_save(flags); ret = handle_irq_desc(irq_to_desc(irq)); local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(generic_handle_irq_safe); #ifdef CONFIG_IRQ_DOMAIN /** * generic_handle_domain_irq - Invoke the handler for a HW irq belonging * to a domain. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an IRQ context with irq regs * initialized. */ int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) { return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); } EXPORT_SYMBOL_GPL(generic_handle_domain_irq); /** * generic_handle_irq_safe - Invoke the handler for a HW irq belonging * to a domain from any context. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, a negative value on error. * * This function can be called from any context (IRQ or process * context). If the interrupt is marked as 'enforce IRQ-context only' then * the function must be invoked from hard interrupt context. */ int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq) { unsigned long flags; int ret; local_irq_save(flags); ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq)); local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe); /** * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging * to a domain. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an NMI context with irq regs * initialized. **/ int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq) { WARN_ON_ONCE(!in_nmi()); return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); } #endif /* Dynamic interrupt handling */ /** * irq_free_descs - free irq descriptors * @from: Start of descriptor range * @cnt: Number of consecutive irqs to free */ void irq_free_descs(unsigned int from, unsigned int cnt) { int i; if (from >= nr_irqs || (from + cnt) > nr_irqs) return; guard(mutex)(&sparse_irq_lock); for (i = 0; i < cnt; i++) free_desc(from + i); } EXPORT_SYMBOL_GPL(irq_free_descs); /** * __irq_alloc_descs - allocate and initialize a range of irq descriptors * @irq: Allocate for specific irq number if irq >= 0 * @from: Start the search from this irq number * @cnt: Number of consecutive irqs to allocate. * @node: Preferred node on which the irq descriptor should be allocated * @owner: Owning module (can be NULL) * @affinity: Optional pointer to an affinity mask array of size @cnt which * hints where the irq descriptors should be allocated and which * default affinities to use * * Returns the first irq number or error code */ int __ref __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, struct module *owner, const struct irq_affinity_desc *affinity) { int start; if (!cnt) return -EINVAL; if (irq >= 0) { if (from > irq) return -EINVAL; from = irq; } else { /* * For interrupts which are freely allocated the * architecture can force a lower bound to the @from * argument. x86 uses this to exclude the GSI space. */ from = arch_dynirq_lower_bound(from); } guard(mutex)(&sparse_irq_lock); start = irq_find_free_area(from, cnt); if (irq >=0 && start != irq) return -EEXIST; if (start + cnt > nr_irqs) { if (!irq_expand_nr_irqs(start + cnt)) return -ENOMEM; } return alloc_descs(start, cnt, node, affinity, owner); } EXPORT_SYMBOL_GPL(__irq_alloc_descs); /** * irq_get_next_irq - get next allocated irq number * @offset: where to start the search * * Returns next irq number after offset or nr_irqs if none is found. */ unsigned int irq_get_next_irq(unsigned int offset) { return irq_find_at_or_after(offset); } struct irq_desc *__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, unsigned int check) { struct irq_desc *desc; desc = irq_to_desc(irq); if (!desc) return NULL; if (check & _IRQ_DESC_CHECK) { if ((check & _IRQ_DESC_PERCPU) && !irq_settings_is_per_cpu_devid(desc)) return NULL; if (!(check & _IRQ_DESC_PERCPU) && irq_settings_is_per_cpu_devid(desc)) return NULL; } if (bus) chip_bus_lock(desc); raw_spin_lock_irqsave(&desc->lock, *flags); return desc; } void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) __releases(&desc->lock) { raw_spin_unlock_irqrestore(&desc->lock, flags); if (bus) chip_bus_sync_unlock(desc); } int irq_set_percpu_devid_partition(unsigned int irq, const struct cpumask *affinity) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || desc->percpu_enabled) return -EINVAL; desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); if (!desc->percpu_enabled) return -ENOMEM; desc->percpu_affinity = affinity ? : cpu_possible_mask; irq_set_percpu_devid_flags(irq); return 0; } int irq_set_percpu_devid(unsigned int irq) { return irq_set_percpu_devid_partition(irq, NULL); } int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !desc->percpu_enabled) return -EINVAL; if (affinity) cpumask_copy(affinity, desc->percpu_affinity); return 0; } EXPORT_SYMBOL_GPL(irq_get_percpu_devid_partition); void kstat_incr_irq_this_cpu(unsigned int irq) { kstat_incr_irqs_this_cpu(irq_to_desc(irq)); } /** * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu * @irq: The interrupt number * @cpu: The cpu number * * Returns the sum of interrupt counts on @cpu since boot for * @irq. The caller must ensure that the interrupt is not removed * concurrently. */ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { struct irq_desc *desc = irq_to_desc(irq); return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; } static unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask) { unsigned int sum = 0; int cpu; if (!irq_settings_is_per_cpu_devid(desc) && !irq_settings_is_per_cpu(desc) && !irq_is_nmi(desc)) return data_race(desc->tot_count); for_each_cpu(cpu, cpumask) sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu)); return sum; } static unsigned int kstat_irqs(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !desc->kstat_irqs) return 0; return kstat_irqs_desc(desc, cpu_possible_mask); } #ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT void kstat_snapshot_irqs(void) { struct irq_desc *desc; unsigned int irq; for_each_irq_desc(irq, desc) { if (!desc->kstat_irqs) continue; this_cpu_write(desc->kstat_irqs->ref, this_cpu_read(desc->kstat_irqs->cnt)); } } unsigned int kstat_get_irq_since_snapshot(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !desc->kstat_irqs) return 0; return this_cpu_read(desc->kstat_irqs->cnt) - this_cpu_read(desc->kstat_irqs->ref); } #endif /** * kstat_irqs_usr - Get the statistics for an interrupt from thread context * @irq: The interrupt number * * Returns the sum of interrupt counts on all cpus since boot for @irq. * * It uses rcu to protect the access since a concurrent removal of an * interrupt descriptor is observing an rcu grace period before * delayed_free_desc()/irq_kobj_release(). */ unsigned int kstat_irqs_usr(unsigned int irq) { unsigned int sum; rcu_read_lock(); sum = kstat_irqs(irq); rcu_read_unlock(); return sum; } #ifdef CONFIG_LOCKDEP void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, struct lock_class_key *request_class) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { lockdep_set_class(&desc->lock, lock_class); lockdep_set_class(&desc->request_mutex, request_class); } } EXPORT_SYMBOL_GPL(__irq_set_lockdep_class); #endif |
| 6 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 | /* * Copyright © 2008 Intel Corporation * Copyright © 2016 Collabora Ltd * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Based on code from the i915 driver. * Original author: Damien Lespiau <damien.lespiau@intel.com> * */ #include <linux/circ_buf.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/poll.h> #include <linux/uaccess.h> #include <drm/drm_crtc.h> #include <drm/drm_debugfs_crc.h> #include <drm/drm_drv.h> #include <drm/drm_print.h> #include "drm_internal.h" /** * DOC: CRC ABI * * DRM device drivers can provide to userspace CRC information of each frame as * it reached a given hardware component (a CRC sampling "source"). * * Userspace can control generation of CRCs in a given CRTC by writing to the * file dri/0/crtc-N/crc/control in debugfs, with N being the :ref:`index of * the CRTC<crtc_index>`. Accepted values are source names (which are * driver-specific) and the "auto" keyword, which will let the driver select a * default source of frame CRCs for this CRTC. * * Once frame CRC generation is enabled, userspace can capture them by reading * the dri/0/crtc-N/crc/data file. Each line in that file contains the frame * number in the first field and then a number of unsigned integer fields * containing the CRC data. Fields are separated by a single space and the number * of CRC fields is source-specific. * * Note that though in some cases the CRC is computed in a specified way and on * the frame contents as supplied by userspace (eDP 1.3), in general the CRC * computation is performed in an unspecified way and on frame contents that have * been already processed in also an unspecified way and thus userspace cannot * rely on being able to generate matching CRC values for the frame contents that * it submits. In this general case, the maximum userspace can do is to compare * the reported CRCs of frames that should have the same contents. * * On the driver side the implementation effort is minimal, drivers only need to * implement &drm_crtc_funcs.set_crc_source and &drm_crtc_funcs.verify_crc_source. * The debugfs files are automatically set up if those vfuncs are set. CRC samples * need to be captured in the driver by calling drm_crtc_add_crc_entry(). * Depending on the driver and HW requirements, &drm_crtc_funcs.set_crc_source * may result in a commit (even a full modeset). * * CRC results must be reliable across non-full-modeset atomic commits, so if a * commit via DRM_IOCTL_MODE_ATOMIC would disable or otherwise interfere with * CRC generation, then the driver must mark that commit as a full modeset * (drm_atomic_crtc_needs_modeset() should return true). As a result, to ensure * consistent results, generic userspace must re-setup CRC generation after a * legacy SETCRTC or an atomic commit with DRM_MODE_ATOMIC_ALLOW_MODESET. */ static int crc_control_show(struct seq_file *m, void *data) { struct drm_crtc *crtc = m->private; if (crtc->funcs->get_crc_sources) { size_t count; const char *const *sources = crtc->funcs->get_crc_sources(crtc, &count); size_t values_cnt; int i; if (count == 0 || !sources) goto out; for (i = 0; i < count; i++) if (!crtc->funcs->verify_crc_source(crtc, sources[i], &values_cnt)) { if (strcmp(sources[i], crtc->crc.source)) seq_printf(m, "%s\n", sources[i]); else seq_printf(m, "%s*\n", sources[i]); } } return 0; out: seq_printf(m, "%s*\n", crtc->crc.source); return 0; } static int crc_control_open(struct inode *inode, struct file *file) { struct drm_crtc *crtc = inode->i_private; return single_open(file, crc_control_show, crtc); } static ssize_t crc_control_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; struct drm_crtc *crtc = m->private; struct drm_crtc_crc *crc = &crtc->crc; char *source; size_t values_cnt; int ret; if (len == 0) return 0; if (len > PAGE_SIZE - 1) { DRM_DEBUG_KMS("Expected < %lu bytes into crtc crc control\n", PAGE_SIZE); return -E2BIG; } source = memdup_user_nul(ubuf, len); if (IS_ERR(source)) return PTR_ERR(source); if (source[len - 1] == '\n') source[len - 1] = '\0'; ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt); if (ret) { kfree(source); return ret; } spin_lock_irq(&crc->lock); if (crc->opened) { spin_unlock_irq(&crc->lock); kfree(source); return -EBUSY; } kfree(crc->source); crc->source = source; spin_unlock_irq(&crc->lock); *offp += len; return len; } static const struct file_operations drm_crtc_crc_control_fops = { .owner = THIS_MODULE, .open = crc_control_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = crc_control_write }; static int crtc_crc_data_count(struct drm_crtc_crc *crc) { assert_spin_locked(&crc->lock); return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR); } static void crtc_crc_cleanup(struct drm_crtc_crc *crc) { kfree(crc->entries); crc->overflow = false; crc->entries = NULL; crc->head = 0; crc->tail = 0; crc->values_cnt = 0; crc->opened = false; } static int crtc_crc_open(struct inode *inode, struct file *filep) { struct drm_crtc *crtc = inode->i_private; struct drm_crtc_crc *crc = &crtc->crc; struct drm_crtc_crc_entry *entries = NULL; size_t values_cnt; int ret = 0; if (drm_drv_uses_atomic_modeset(crtc->dev)) { ret = drm_modeset_lock_single_interruptible(&crtc->mutex); if (ret) return ret; if (!crtc->state->active) ret = -EIO; drm_modeset_unlock(&crtc->mutex); if (ret) return ret; } ret = crtc->funcs->verify_crc_source(crtc, crc->source, &values_cnt); if (ret) return ret; if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) return -EINVAL; if (WARN_ON(values_cnt == 0)) return -EINVAL; entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; spin_lock_irq(&crc->lock); if (!crc->opened) { crc->opened = true; crc->entries = entries; crc->values_cnt = values_cnt; } else { ret = -EBUSY; } spin_unlock_irq(&crc->lock); if (ret) { kfree(entries); return ret; } ret = crtc->funcs->set_crc_source(crtc, crc->source); if (ret) goto err; return 0; err: spin_lock_irq(&crc->lock); crtc_crc_cleanup(crc); spin_unlock_irq(&crc->lock); return ret; } static int crtc_crc_release(struct inode *inode, struct file *filep) { struct drm_crtc *crtc = filep->f_inode->i_private; struct drm_crtc_crc *crc = &crtc->crc; /* terminate the infinite while loop if 'drm_dp_aux_crc_work' running */ spin_lock_irq(&crc->lock); crc->opened = false; spin_unlock_irq(&crc->lock); crtc->funcs->set_crc_source(crtc, NULL); spin_lock_irq(&crc->lock); crtc_crc_cleanup(crc); spin_unlock_irq(&crc->lock); return 0; } /* * 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space * separated, with a newline at the end and null-terminated. */ #define LINE_LEN(values_cnt) (10 + 11 * values_cnt + 1 + 1) #define MAX_LINE_LEN (LINE_LEN(DRM_MAX_CRC_NR)) static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf, size_t count, loff_t *pos) { struct drm_crtc *crtc = filep->f_inode->i_private; struct drm_crtc_crc *crc = &crtc->crc; struct drm_crtc_crc_entry *entry; char buf[MAX_LINE_LEN]; int ret, i; spin_lock_irq(&crc->lock); if (!crc->source) { spin_unlock_irq(&crc->lock); return 0; } /* Nothing to read? */ while (crtc_crc_data_count(crc) == 0) { if (filep->f_flags & O_NONBLOCK) { spin_unlock_irq(&crc->lock); return -EAGAIN; } ret = wait_event_interruptible_lock_irq(crc->wq, crtc_crc_data_count(crc), crc->lock); if (ret) { spin_unlock_irq(&crc->lock); return ret; } } /* We know we have an entry to be read */ entry = &crc->entries[crc->tail]; if (count < LINE_LEN(crc->values_cnt)) { spin_unlock_irq(&crc->lock); return -EINVAL; } BUILD_BUG_ON_NOT_POWER_OF_2(DRM_CRC_ENTRIES_NR); crc->tail = (crc->tail + 1) & (DRM_CRC_ENTRIES_NR - 1); spin_unlock_irq(&crc->lock); if (entry->has_frame_counter) sprintf(buf, "0x%08x", entry->frame); else sprintf(buf, "XXXXXXXXXX"); for (i = 0; i < crc->values_cnt; i++) sprintf(buf + 10 + i * 11, " 0x%08x", entry->crcs[i]); sprintf(buf + 10 + crc->values_cnt * 11, "\n"); if (copy_to_user(user_buf, buf, LINE_LEN(crc->values_cnt))) return -EFAULT; return LINE_LEN(crc->values_cnt); } static __poll_t crtc_crc_poll(struct file *file, poll_table *wait) { struct drm_crtc *crtc = file->f_inode->i_private; struct drm_crtc_crc *crc = &crtc->crc; __poll_t ret = 0; poll_wait(file, &crc->wq, wait); spin_lock_irq(&crc->lock); if (crc->source && crtc_crc_data_count(crc)) ret |= EPOLLIN | EPOLLRDNORM; spin_unlock_irq(&crc->lock); return ret; } static const struct file_operations drm_crtc_crc_data_fops = { .owner = THIS_MODULE, .open = crtc_crc_open, .read = crtc_crc_read, .poll = crtc_crc_poll, .release = crtc_crc_release, }; void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc) { struct dentry *crc_ent; if (!crtc->funcs->set_crc_source || !crtc->funcs->verify_crc_source) return; crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry); debugfs_create_file("control", S_IRUGO | S_IWUSR, crc_ent, crtc, &drm_crtc_crc_control_fops); debugfs_create_file("data", S_IRUGO, crc_ent, crtc, &drm_crtc_crc_data_fops); } /** * drm_crtc_add_crc_entry - Add entry with CRC information for a frame * @crtc: CRTC to which the frame belongs * @has_frame: whether this entry has a frame number to go with * @frame: number of the frame these CRCs are about * @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt * * For each frame, the driver polls the source of CRCs for new data and calls * this function to add them to the buffer from where userspace reads. */ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame, uint32_t frame, uint32_t *crcs) { struct drm_crtc_crc *crc = &crtc->crc; struct drm_crtc_crc_entry *entry; int head, tail; unsigned long flags; spin_lock_irqsave(&crc->lock, flags); /* Caller may not have noticed yet that userspace has stopped reading */ if (!crc->entries) { spin_unlock_irqrestore(&crc->lock, flags); return -EINVAL; } head = crc->head; tail = crc->tail; if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) { bool was_overflow = crc->overflow; crc->overflow = true; spin_unlock_irqrestore(&crc->lock, flags); if (!was_overflow) DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n"); return -ENOBUFS; } entry = &crc->entries[head]; entry->frame = frame; entry->has_frame_counter = has_frame; memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt); head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1); crc->head = head; spin_unlock_irqrestore(&crc->lock, flags); wake_up_interruptible(&crc->wq); return 0; } EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry); |
| 38 38 39 32 38 39 39 38 39 37 38 39 37 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 | // SPDX-License-Identifier: GPL-2.0 /* * Copyright |